You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/06/18 22:02:52 UTC
[01/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
[Forced Update!]
Repository: hive
Updated Branches:
refs/heads/master-txnstats be3039587 -> 1d46608e8 (forced update)
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
index 688d0ed..84f9573 100644
--- a/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out
@@ -284,8 +284,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -572,8 +572,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -864,8 +864,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1134,8 +1134,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1258,8 +1258,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out b/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out
index 3a1c0e7..51af71a 100644
--- a/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out
@@ -111,8 +111,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out b/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
index e9a0e45..3b775a1 100644
--- a/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_mapjoin2.q.out
@@ -132,8 +132,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_mapjoin3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_mapjoin3.q.out b/ql/src/test/results/clientpositive/vectorized_mapjoin3.q.out
index fb7198d..094c3ce 100644
--- a/ql/src/test/results/clientpositive/vectorized_mapjoin3.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_mapjoin3.q.out
@@ -151,8 +151,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -325,8 +325,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -499,8 +499,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_math_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_math_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_math_funcs.q.out
index aea8f0a..9b96477 100644
--- a/ql/src/test/results/clientpositive/vectorized_math_funcs.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_math_funcs.q.out
@@ -149,8 +149,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out
index 77a91b9..5b48222 100644
--- a/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out
@@ -75,8 +75,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_timestamp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp.q.out
index 7e03bf3..f845873 100644
--- a/ql/src/test/results/clientpositive/vectorized_timestamp.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_timestamp.q.out
@@ -153,8 +153,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -255,8 +255,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -340,8 +340,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -447,8 +447,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
index fb49a9b..72e9916 100644
--- a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
@@ -279,8 +279,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -472,8 +472,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -649,8 +649,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -826,8 +826,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -952,8 +952,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1059,8 +1059,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1179,8 +1179,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_timestamp_ints_casts.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp_ints_casts.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp_ints_casts.q.out
index f12a2b6..95f07d9 100644
--- a/ql/src/test/results/clientpositive/vectorized_timestamp_ints_casts.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_timestamp_ints_casts.q.out
@@ -81,8 +81,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -241,8 +241,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
[22/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out
index deb9f67..1ef50ca 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_udf.q.out
@@ -109,8 +109,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -226,8 +226,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -343,8 +343,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -460,8 +460,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -577,8 +577,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -694,8 +694,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -811,8 +811,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -928,8 +928,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1045,8 +1045,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1168,8 +1168,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1269,8 +1269,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1386,8 +1386,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1503,8 +1503,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1620,8 +1620,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1744,8 +1744,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1863,8 +1863,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1972,8 +1972,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2074,8 +2074,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2191,8 +2191,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2327,8 +2327,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2507,8 +2507,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2781,8 +2781,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2898,8 +2898,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3015,8 +3015,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -3132,8 +3132,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3268,8 +3268,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3432,8 +3432,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3675,8 +3675,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3806,8 +3806,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3937,8 +3937,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4061,7 +4061,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (key + key) (type: decimal(16,3))
outputColumnNames: _col0
@@ -4069,7 +4069,7 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [3]
- selectExpressions: DecimalColAddDecimalColumn(col 0:decimal(15,3), col 0:decimal(15,3)) -> 3:decimal(16,3)
+ selectExpressions: Decimal64ColAddDecimal64Column(col 0:decimal(15,3)/DECIMAL_64, col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(16,3)/DECIMAL_64
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -4087,8 +4087,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4096,9 +4095,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(16,3)]
+ scratchColumnTypeNames: [decimal(16,3)/DECIMAL_64]
Stage: Stage-0
Fetch Operator
@@ -4179,15 +4178,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (key + CAST( value AS decimal(10,0))) (type: decimal(16,3))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: DecimalColAddDecimalColumn(col 0:decimal(15,3), col 3:decimal(10,0))(children: CastLongToDecimal(col 1:int) -> 3:decimal(10,0)) -> 4:decimal(16,3)
+ projectedOutputColumnNums: [5]
+ selectExpressions: DecimalColAddDecimalColumn(col 3:decimal(15,3), col 4:decimal(10,0))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3), CastLongToDecimal(col 1:int) -> 4:decimal(10,0)) -> 5:decimal(16,3)
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -4205,8 +4204,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4214,9 +4212,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(10,0), decimal(16,3)]
+ scratchColumnTypeNames: [decimal(15,3), decimal(10,0), decimal(16,3)]
Stage: Stage-0
Fetch Operator
@@ -4297,15 +4295,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (UDFToDouble(key) + (UDFToDouble(value) / 2.0D)) (type: double)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: DoubleColAddDoubleColumn(col 3:double, col 5:double)(children: CastDecimalToDouble(col 0:decimal(15,3)) -> 3:double, DoubleColDivideDoubleScalar(col 4:double, val 2.0)(children: CastLongToDouble(col 1:int) -> 4:double) -> 5:double) -> 4:double
+ projectedOutputColumnNums: [5]
+ selectExpressions: DoubleColAddDoubleColumn(col 4:double, col 6:double)(children: CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:double, DoubleColDivideDoubleScalar(col 5:double, val 2.0)(children: CastLongToDouble(col 1:int) -> 5:double) -> 6:double) -> 5:double
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -4323,8 +4321,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4332,9 +4329,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [double, double, double]
+ scratchColumnTypeNames: [decimal(15,3), double, double, double]
Stage: Stage-0
Fetch Operator
@@ -4415,15 +4412,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (UDFToDouble(key) + 1.0D) (type: double)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: DoubleColAddDoubleScalar(col 3:double, val 1.0)(children: CastDecimalToDouble(col 0:decimal(15,3)) -> 3:double) -> 4:double
+ projectedOutputColumnNums: [5]
+ selectExpressions: DoubleColAddDoubleScalar(col 4:double, val 1.0)(children: CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:double) -> 5:double
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -4441,8 +4438,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4450,9 +4446,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [double, double]
+ scratchColumnTypeNames: [decimal(15,3), double, double]
Stage: Stage-0
Fetch Operator
@@ -4533,7 +4529,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (key - key) (type: decimal(16,3))
outputColumnNames: _col0
@@ -4541,7 +4537,7 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [3]
- selectExpressions: DecimalColSubtractDecimalColumn(col 0:decimal(15,3), col 0:decimal(15,3)) -> 3:decimal(16,3)
+ selectExpressions: Decimal64ColSubtractDecimal64Column(col 0:decimal(15,3)/DECIMAL_64, col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(16,3)/DECIMAL_64
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -4559,8 +4555,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4568,9 +4563,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(16,3)]
+ scratchColumnTypeNames: [decimal(16,3)/DECIMAL_64]
Stage: Stage-0
Fetch Operator
@@ -4651,15 +4646,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (key - CAST( value AS decimal(10,0))) (type: decimal(16,3))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: DecimalColSubtractDecimalColumn(col 0:decimal(15,3), col 3:decimal(10,0))(children: CastLongToDecimal(col 1:int) -> 3:decimal(10,0)) -> 4:decimal(16,3)
+ projectedOutputColumnNums: [5]
+ selectExpressions: DecimalColSubtractDecimalColumn(col 3:decimal(15,3), col 4:decimal(10,0))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3), CastLongToDecimal(col 1:int) -> 4:decimal(10,0)) -> 5:decimal(16,3)
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -4677,8 +4672,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4686,9 +4680,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(10,0), decimal(16,3)]
+ scratchColumnTypeNames: [decimal(15,3), decimal(10,0), decimal(16,3)]
Stage: Stage-0
Fetch Operator
@@ -4769,15 +4763,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (UDFToDouble(key) - (UDFToDouble(value) / 2.0D)) (type: double)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: DoubleColSubtractDoubleColumn(col 3:double, col 5:double)(children: CastDecimalToDouble(col 0:decimal(15,3)) -> 3:double, DoubleColDivideDoubleScalar(col 4:double, val 2.0)(children: CastLongToDouble(col 1:int) -> 4:double) -> 5:double) -> 4:double
+ projectedOutputColumnNums: [5]
+ selectExpressions: DoubleColSubtractDoubleColumn(col 4:double, col 6:double)(children: CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:double, DoubleColDivideDoubleScalar(col 5:double, val 2.0)(children: CastLongToDouble(col 1:int) -> 5:double) -> 6:double) -> 5:double
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -4795,8 +4789,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4804,9 +4797,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [double, double, double]
+ scratchColumnTypeNames: [decimal(15,3), double, double, double]
Stage: Stage-0
Fetch Operator
@@ -4887,15 +4880,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (UDFToDouble(key) - 1.0D) (type: double)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: DoubleColSubtractDoubleScalar(col 3:double, val 1.0)(children: CastDecimalToDouble(col 0:decimal(15,3)) -> 3:double) -> 4:double
+ projectedOutputColumnNums: [5]
+ selectExpressions: DoubleColSubtractDoubleScalar(col 4:double, val 1.0)(children: CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:double) -> 5:double
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -4913,8 +4906,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4922,9 +4914,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [double, double]
+ scratchColumnTypeNames: [decimal(15,3), double, double]
Stage: Stage-0
Fetch Operator
@@ -5005,15 +4997,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (key * key) (type: decimal(31,6))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [3]
- selectExpressions: DecimalColMultiplyDecimalColumn(col 0:decimal(15,3), col 0:decimal(15,3)) -> 3:decimal(31,6)
+ projectedOutputColumnNums: [5]
+ selectExpressions: DecimalColMultiplyDecimalColumn(col 3:decimal(15,3), col 4:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3), ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 4:decimal(15,3)) -> 5:decimal(31,6)
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -5031,8 +5023,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5040,9 +5031,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(31,6)]
+ scratchColumnTypeNames: [decimal(15,3), decimal(15,3), decimal(31,6)]
Stage: Stage-0
Fetch Operator
@@ -5123,12 +5114,12 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterDecimalColGreaterDecimalScalar(col 4:decimal(26,3), val 0)(children: DecimalColMultiplyDecimalColumn(col 0:decimal(15,3), col 3:decimal(10,0))(children: CastLongToDecimal(col 1:int) -> 3:decimal(10,0)) -> 4:decimal(26,3))
+ predicateExpression: FilterDecimalColGreaterDecimalScalar(col 5:decimal(26,3), val 0)(children: DecimalColMultiplyDecimalColumn(col 3:decimal(15,3), col 4:decimal(10,0))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3), CastLongToDecimal(col 1:int) -> 4:decimal(10,0)) -> 5:decimal(26,3))
predicate: ((key * CAST( value AS decimal(10,0))) > 0) (type: boolean)
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -5155,8 +5146,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5164,9 +5154,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(10,0), decimal(26,3)]
+ scratchColumnTypeNames: [decimal(15,3), decimal(10,0), decimal(26,3)]
Stage: Stage-0
Fetch Operator
@@ -5231,15 +5221,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (key * CAST( value AS decimal(10,0))) (type: decimal(26,3))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: DecimalColMultiplyDecimalColumn(col 0:decimal(15,3), col 3:decimal(10,0))(children: CastLongToDecimal(col 1:int) -> 3:decimal(10,0)) -> 4:decimal(26,3)
+ projectedOutputColumnNums: [5]
+ selectExpressions: DecimalColMultiplyDecimalColumn(col 3:decimal(15,3), col 4:decimal(10,0))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3), CastLongToDecimal(col 1:int) -> 4:decimal(10,0)) -> 5:decimal(26,3)
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -5257,8 +5247,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5266,9 +5255,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(10,0), decimal(26,3)]
+ scratchColumnTypeNames: [decimal(15,3), decimal(10,0), decimal(26,3)]
Stage: Stage-0
Fetch Operator
@@ -5349,15 +5338,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (UDFToDouble(key) * (UDFToDouble(value) / 2.0D)) (type: double)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: DoubleColMultiplyDoubleColumn(col 3:double, col 5:double)(children: CastDecimalToDouble(col 0:decimal(15,3)) -> 3:double, DoubleColDivideDoubleScalar(col 4:double, val 2.0)(children: CastLongToDouble(col 1:int) -> 4:double) -> 5:double) -> 4:double
+ projectedOutputColumnNums: [5]
+ selectExpressions: DoubleColMultiplyDoubleColumn(col 4:double, col 6:double)(children: CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:double, DoubleColDivideDoubleScalar(col 5:double, val 2.0)(children: CastLongToDouble(col 1:int) -> 5:double) -> 6:double) -> 5:double
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -5375,8 +5364,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5384,9 +5372,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [double, double, double]
+ scratchColumnTypeNames: [decimal(15,3), double, double, double]
Stage: Stage-0
Fetch Operator
@@ -5467,15 +5455,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (UDFToDouble(key) * 2.0D) (type: double)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: DoubleColMultiplyDoubleScalar(col 3:double, val 2.0)(children: CastDecimalToDouble(col 0:decimal(15,3)) -> 3:double) -> 4:double
+ projectedOutputColumnNums: [5]
+ selectExpressions: DoubleColMultiplyDoubleScalar(col 4:double, val 2.0)(children: CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:double) -> 5:double
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -5493,8 +5481,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5502,9 +5489,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [double, double]
+ scratchColumnTypeNames: [decimal(15,3), double, double]
Stage: Stage-0
Fetch Operator
@@ -5585,15 +5572,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (key / 0) (type: decimal(18,6))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [3]
- selectExpressions: DecimalColDivideDecimalScalar(col 0:decimal(15,3), val 0) -> 3:decimal(18,6)
+ projectedOutputColumnNums: [4]
+ selectExpressions: DecimalColDivideDecimalScalar(col 3:decimal(15,3), val 0)(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:decimal(18,6)
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -5611,8 +5598,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5620,9 +5606,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(18,6)]
+ scratchColumnTypeNames: [decimal(15,3), decimal(18,6)]
Stage: Stage-0
Fetch Operator
@@ -5703,12 +5689,12 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterDecimalColNotEqualDecimalScalar(col 0:decimal(15,3), val 0)
+ predicateExpression: FilterDecimal64ColNotEqualDecimal64Scalar(col 0:decimal(15,3)/DECIMAL_64, val 0)
predicate: (key <> 0) (type: boolean)
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -5717,8 +5703,8 @@ STAGE PLANS:
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [3]
- selectExpressions: DecimalColDivideDecimalColumn(col 0:decimal(15,3), col 0:decimal(15,3)) -> 3:decimal(34,19)
+ projectedOutputColumnNums: [5]
+ selectExpressions: DecimalColDivideDecimalColumn(col 3:decimal(15,3), col 4:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3), ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 4:decimal(15,3)) -> 5:decimal(34,19)
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -5736,8 +5722,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5745,9 +5730,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(34,19)]
+ scratchColumnTypeNames: [decimal(15,3), decimal(15,3), decimal(34,19)]
Stage: Stage-0
Fetch Operator
@@ -5823,7 +5808,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -5837,8 +5822,8 @@ STAGE PLANS:
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: DecimalColDivideDecimalColumn(col 0:decimal(15,3), col 3:decimal(10,0))(children: CastLongToDecimal(col 1:int) -> 3:decimal(10,0)) -> 4:decimal(26,14)
+ projectedOutputColumnNums: [5]
+ selectExpressions: DecimalColDivideDecimalColumn(col 3:decimal(15,3), col 4:decimal(10,0))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3), CastLongToDecimal(col 1:int) -> 4:decimal(10,0)) -> 5:decimal(26,14)
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -5856,8 +5841,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5865,9 +5849,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(10,0), decimal(26,14)]
+ scratchColumnTypeNames: [decimal(15,3), decimal(10,0), decimal(26,14)]
Stage: Stage-0
Fetch Operator
@@ -5933,7 +5917,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -5947,8 +5931,8 @@ STAGE PLANS:
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: DoubleColDivideDoubleColumn(col 3:double, col 5:double)(children: CastDecimalToDouble(col 0:decimal(15,3)) -> 3:double, DoubleColDivideDoubleScalar(col 4:double, val 2.0)(children: CastLongToDouble(col 1:int) -> 4:double) -> 5:double) -> 4:double
+ projectedOutputColumnNums: [5]
+ selectExpressions: DoubleColDivideDoubleColumn(col 4:double, col 6:double)(children: CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:double, DoubleColDivideDoubleScalar(col 5:double, val 2.0)(children: CastLongToDouble(col 1:int) -> 5:double) -> 6:double) -> 5:double
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -5966,8 +5950,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5975,9 +5958,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [double, double, double]
+ scratchColumnTypeNames: [decimal(15,3), double, double, double]
Stage: Stage-0
Fetch Operator
@@ -6043,15 +6026,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (1.0D + (UDFToDouble(key) / 2.0D)) (type: double)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [3]
- selectExpressions: DoubleScalarAddDoubleColumn(val 1.0, col 4:double)(children: DoubleColDivideDoubleScalar(col 3:double, val 2.0)(children: CastDecimalToDouble(col 0:decimal(15,3)) -> 3:double) -> 4:double) -> 3:double
+ projectedOutputColumnNums: [4]
+ selectExpressions: DoubleScalarAddDoubleColumn(val 1.0, col 5:double)(children: DoubleColDivideDoubleScalar(col 4:double, val 2.0)(children: CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:double) -> 5:double) -> 4:double
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -6069,8 +6052,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6078,9 +6060,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [double, double]
+ scratchColumnTypeNames: [decimal(15,3), double, double]
Stage: Stage-0
Fetch Operator
@@ -6161,15 +6143,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: abs(key) (type: decimal(15,3))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [3]
- selectExpressions: FuncAbsDecimalToDecimal(col 0:decimal(15,3)) -> 3:decimal(15,3)
+ projectedOutputColumnNums: [4]
+ selectExpressions: FuncAbsDecimalToDecimal(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:decimal(15,3)
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -6187,8 +6169,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6196,9 +6177,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(15,3)]
+ scratchColumnTypeNames: [decimal(15,3), decimal(15,3)]
Stage: Stage-0
Fetch Operator
@@ -6283,7 +6264,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: key (type: decimal(15,3)), value (type: int)
outputColumnNames: key, value
@@ -6295,7 +6276,7 @@ STAGE PLANS:
Group By Operator
aggregations: sum(key), count(key)
Group By Vectorization:
- aggregators: VectorUDAFSumDecimal(col 0:decimal(15,3)) -> decimal(25,3), VectorUDAFCount(col 0:decimal(15,3)) -> bigint
+ aggregators: VectorUDAFSumDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> decimal(25,3), VectorUDAFCount(col 0:decimal(15,3)/DECIMAL_64) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 1:int
@@ -6324,8 +6305,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6333,7 +6313,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -6479,15 +6459,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (- key) (type: decimal(15,3))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [3]
- selectExpressions: FuncNegateDecimalToDecimal(col 0:decimal(15,3)) -> 3:decimal(15,3)
+ projectedOutputColumnNums: [4]
+ selectExpressions: FuncNegateDecimalToDecimal(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:decimal(15,3)
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -6505,8 +6485,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6514,9 +6493,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(15,3)]
+ scratchColumnTypeNames: [decimal(15,3), decimal(15,3)]
Stage: Stage-0
Fetch Operator
@@ -6754,15 +6733,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: floor(key) (type: decimal(13,0))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [3]
- selectExpressions: FuncFloorDecimalToDecimal(col 0:decimal(15,3)) -> 3:decimal(13,0)
+ projectedOutputColumnNums: [4]
+ selectExpressions: FuncFloorDecimalToDecimal(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:decimal(13,0)
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -6780,8 +6759,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6789,9 +6767,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(13,0)]
+ scratchColumnTypeNames: [decimal(15,3), decimal(13,0)]
Stage: Stage-0
Fetch Operator
@@ -6872,15 +6850,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: round(key, 2) (type: decimal(15,2))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [3]
- selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 0:decimal(15,3), decimalPlaces 2) -> 3:decimal(15,2)
+ projectedOutputColumnNums: [4]
+ selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 3:decimal(15,3), decimalPlaces 2)(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:decimal(15,2)
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -6898,8 +6876,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6907,9 +6884,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(15,2)]
+ scratchColumnTypeNames: [decimal(15,3), decimal(15,2)]
Stage: Stage-0
Fetch Operator
@@ -6990,7 +6967,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: power(key, 2) (type: double)
outputColumnNames: _col0
@@ -7016,8 +6993,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -7025,7 +7001,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: [double]
@@ -7108,15 +7084,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: ((key + 1) % (key / 2)) (type: decimal(18,6))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [5]
- selectExpressions: DecimalColModuloDecimalColumn(col 3:decimal(16,3), col 4:decimal(18,6))(children: DecimalColAddDecimalScalar(col 0:decimal(15,3), val 1) -> 3:decimal(16,3), DecimalColDivideDecimalScalar(col 0:decimal(15,3), val 2) -> 4:decimal(18,6)) -> 5:decimal(18,6)
+ projectedOutputColumnNums: [6]
+ selectExpressions: DecimalColModuloDecimalColumn(col 7:decimal(16,3), col 5:decimal(18,6))(children: ConvertDecimal64ToDecimal(col 3:decimal(16,3)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 0:decimal(15,3)/DECIMAL_64, decimal64Val 1000, decimalVal 1) -> 3:decimal(16,3)/DECIMAL_64) -> 7:decimal(16,3), DecimalColDivideDecimalScalar(col 4:decimal(15,3), val 2)(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 4:decimal(15,3)) -> 5:decimal(18,6)) -> 6:decimal(18,6)
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -7134,8 +7110,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -7143,9 +7118,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(16,3), decimal(18,6), decimal(18,6)]
+ scratchColumnTypeNames: [decimal(16,3)/DECIMAL_64, decimal(15,3), decimal(18,6), decimal(18,6), decimal(16,3)]
Stage: Stage-0
Fetch Operator
@@ -7229,20 +7204,20 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: value (type: int), key (type: decimal(15,3)), UDFToDouble(key) (type: double), (UDFToDouble(key) * UDFToDouble(key)) (type: double)
outputColumnNames: _col0, _col1, _col2, _col3
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [1, 0, 3, 6]
- selectExpressions: CastDecimalToDouble(col 0:decimal(15,3)) -> 3:double, DoubleColMultiplyDoubleColumn(col 4:double, col 5:double)(children: CastDecimalToDouble(col 0:decimal(15,3)) -> 4:double, CastDecimalToDouble(col 0:decimal(15,3)) -> 5:double) -> 6:double
+ projectedOutputColumnNums: [1, 0, 4, 7]
+ selectExpressions: CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:double, DoubleColMultiplyDoubleColumn(col 5:double, col 6:double)(children: CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 5:double, CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 6:double) -> 7:double
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: sum(_col3), sum(_col2), count(_col1)
Group By Vectorization:
- aggregators: VectorUDAFSumDouble(col 6:double) -> double, VectorUDAFSumDouble(col 3:double) -> double, VectorUDAFCount(col 0:decimal(15,3)) -> bigint
+ aggregators: VectorUDAFSumDouble(col 7:double) -> double, VectorUDAFSumDouble(col 4:double) -> double, VectorUDAFCount(col 0:decimal(15,3)/DECIMAL_64) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 1:int
@@ -7271,8 +7246,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -7280,9 +7254,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(15,3), value:int
+ dataColumns: key:decimal(15,3)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [double, double, double, double]
+ scratchColumnTypeNames: [decimal(15,3), double, double, double, double]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -7394,20 +7368,20 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(15,3), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(15,3)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: value (type: int), key (type: decimal(15,3)), UDFToDouble(key) (type: double), (UDFToDouble(key) * UDFToDouble(key)) (type: double)
outputColumnNames: _col0, _col1, _col2, _col3
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [1, 0, 3, 6]
- selectExpressions: CastDecimalToDouble(col 0:decimal(15,3)) -> 3:double, DoubleColMultiplyDoubleColumn(col 4:double, col 5:double)(children: CastDecimalToDouble(col 0:decimal(15,3)) -> 4:double, CastDecimalToDouble(col 0:decimal(15,3)) -> 5:double) -> 6:double
+ projectedOutputColumnNums: [1, 0, 4, 7]
+ selectExpressions: CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 4:double, DoubleColMultiplyDoubleColumn(col 5:double, col 6:double)(children: CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 5:double, CastDecimalToDouble(col 3:decimal(15,3))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,3)/DECIMAL_64) -> 3:decimal(15,3)) -> 6:double) -> 7:double
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: sum(_col3), sum(_col2), count(_col1)
Group By Vectorization:
- aggregators: VectorUDAFSumDouble(col 6:double) -> double, VectorUDAFSumDouble(col 3:double) -> double, VectorUDAFCount(col 0:decimal(15,3)) -> bigint
+ aggregators: VectorUDAFSumDouble(col 7:double) -> double, VectorUDAFSumDouble(col 4:double) -> double, VectorUDAFCount(col 0:decimal(15,3)/DECIMAL_64) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 1:int
@@ -7436,8 +7410,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse:
<TRUNCATED>
[45/67] [abbrv] hive git commit: HIVE-19923: Follow up of HIVE-19615,
use UnaryFunction instead of prefix (Slim Bouguerra,
reviewed by Nishant Bangarwa, Ashutosh Chauhan)
Posted by se...@apache.org.
HIVE-19923: Follow up of HIVE-19615, use UnaryFunction instead of prefix (Slim Bouguerra, reviewed by Nishant Bangarwa, Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3a6ad266
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3a6ad266
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3a6ad266
Branch: refs/heads/master-txnstats
Commit: 3a6ad2661e5fdd3e6ce8b8f7ee5a35ddb3bd2c47
Parents: 6a16a71
Author: Slim Bouguerra <sl...@gmail.com>
Authored: Mon Jun 18 07:54:44 2018 -0700
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Mon Jun 18 07:54:44 2018 -0700
----------------------------------------------------------------------
.../ql/parse/DruidSqlOperatorConverter.java | 35 ++++++++++++++++++--
.../clientpositive/druid/druidmini_test1.q.out | 2 +-
2 files changed, 34 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/3a6ad266/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
index 4db0714..6aa98c0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DruidSqlOperatorConverter.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.parse;
import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
import com.google.common.collect.Maps;
import org.apache.calcite.adapter.druid.DirectOperatorConversion;
import org.apache.calcite.adapter.druid.DruidExpressions;
@@ -51,6 +52,7 @@ import org.joda.time.Period;
import javax.annotation.Nullable;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.TimeZone;
@@ -87,9 +89,9 @@ public class DruidSqlOperatorConverter {
druidOperatorMap
.put(SqlStdOperatorTable.SUBSTRING, new DruidSqlOperatorConverter.DruidSubstringOperatorConversion());
druidOperatorMap
- .put(SqlStdOperatorTable.IS_NULL, new UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_NULL, "isnull"));
+ .put(SqlStdOperatorTable.IS_NULL, new UnaryFunctionOperatorConversion(SqlStdOperatorTable.IS_NULL, "isnull"));
druidOperatorMap.put(SqlStdOperatorTable.IS_NOT_NULL,
- new UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_NOT_NULL, "notnull")
+ new UnaryFunctionOperatorConversion(SqlStdOperatorTable.IS_NOT_NULL, "notnull")
);
druidOperatorMap.put(HiveTruncSqlOperator.INSTANCE, new DruidDateTruncOperatorConversion());
druidOperatorMap.put(HiveToDateSqlOperator.INSTANCE, new DruidToDateOperatorConversion());
@@ -346,4 +348,33 @@ public class DruidSqlOperatorConverter {
);
}
+ public static class UnaryFunctionOperatorConversion implements org.apache.calcite.adapter.druid.DruidSqlOperatorConverter {
+
+ private final SqlOperator operator;
+ private final String druidOperator;
+
+ public UnaryFunctionOperatorConversion(SqlOperator operator, String druidOperator) {
+ this.operator = operator;
+ this.druidOperator = druidOperator;
+ }
+
+ @Override public SqlOperator calciteOperator() {
+ return operator;
+ }
+
+ @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType,
+ DruidQuery druidQuery) {
+ final RexCall call = (RexCall) rexNode;
+
+ final List<String> druidExpressions = DruidExpressions.toDruidExpressions(
+ druidQuery, rowType,
+ call.getOperands());
+
+ if (druidExpressions == null) {
+ return null;
+ }
+
+ return DruidQuery.format("%s(%s)", druidOperator, Iterables.getOnlyElement(druidExpressions));
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/3a6ad266/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out b/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
index 89da36a..4e078aa 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_test1.q.out
@@ -814,7 +814,7 @@ STAGE PLANS:
properties:
druid.fieldNames vc,vc0
druid.fieldTypes boolean,boolean
- druid.query.json {"queryType":"scan","dataSource":"default.druid_table_n3","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"(\"cstring1\" isnull)","outputType":"FLOAT"},{"type":"expression","name":"vc0","expression":"(\"cint\" notnull)","outputType":"FLOAT"}],"columns":["vc","vc0"],"resultFormat":"compactedList"}
+ druid.query.json {"queryType":"scan","dataSource":"default.druid_table_n3","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"isnull(\"cstring1\")","outputType":"FLOAT"},{"type":"expression","name":"vc0","expression":"notnull(\"cint\")","outputType":"FLOAT"}],"columns":["vc","vc0"],"resultFormat":"compactedList"}
druid.query.type scan
Select Operator
expressions: vc (type: boolean), vc0 (type: boolean)
[41/67] [abbrv] hive git commit: HIVE-19898: Disable
TransactionalValidationListener when the table is not in the Hive catalog
(Jason Dere, reviewed by Eugene Koifman)
Posted by se...@apache.org.
HIVE-19898: Disable TransactionalValidationListener when the table is not in the Hive catalog (Jason Dere, reviewed by Eugene Koifman)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ebd2c5f8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ebd2c5f8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ebd2c5f8
Branch: refs/heads/master-txnstats
Commit: ebd2c5f8a82b35eabca146520ffcd87605084618
Parents: 766c3dc
Author: Jason Dere <jd...@hortonworks.com>
Authored: Sun Jun 17 21:53:23 2018 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Sun Jun 17 21:53:23 2018 -0700
----------------------------------------------------------------------
.../TestTransactionalValidationListener.java | 127 +++++++++++++++++++
.../TransactionalValidationListener.java | 23 +++-
.../metastore/client/MetaStoreClientTest.java | 2 +-
3 files changed, 146 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/ebd2c5f8/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTransactionalValidationListener.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTransactionalValidationListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTransactionalValidationListener.java
new file mode 100644
index 0000000..3aaad22
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTransactionalValidationListener.java
@@ -0,0 +1,127 @@
+package org.apache.hadoop.hive.metastore;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+import org.apache.hadoop.hive.metastore.api.Catalog;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.MetaStoreClientTest;
+import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+@RunWith(Parameterized.class)
+public class TestTransactionalValidationListener extends MetaStoreClientTest {
+
+ private AbstractMetaStoreService metaStore;
+ private IMetaStoreClient client;
+ private boolean createdCatalogs = false;
+
+ @BeforeClass
+ public static void startMetaStores() {
+ Map<MetastoreConf.ConfVars, String> msConf = new HashMap<MetastoreConf.ConfVars, String>();
+
+ // Enable TransactionalValidationListener + create.as.acid
+ Map<String, String> extraConf = new HashMap<>();
+ extraConf.put("metastore.create.as.acid", "true");
+ extraConf.put("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager");
+ extraConf.put("hive.support.concurrency", "true");
+ startMetaStores(msConf, extraConf);
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ // Get new client
+ client = metaStore.getClient();
+ if (!createdCatalogs) {
+ createCatalogs();
+ createdCatalogs = true;
+ }
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ try {
+ if (client != null) {
+ client.close();
+ }
+ } finally {
+ client = null;
+ }
+ }
+
+ public TestTransactionalValidationListener(String name, AbstractMetaStoreService metaStore) throws Exception {
+ this.metaStore = metaStore;
+ }
+
+ private void createCatalogs() throws Exception {
+ String[] catNames = {"spark", "myapp"};
+ String[] location = {MetaStoreTestUtils.getTestWarehouseDir("spark"),
+ MetaStoreTestUtils.getTestWarehouseDir("myapp")};
+
+ for (int i = 0; i < catNames.length; i++) {
+ Catalog cat = new CatalogBuilder()
+ .setName(catNames[i])
+ .setLocation(location[i])
+ .build();
+ client.createCatalog(cat);
+ File dir = new File(cat.getLocationUri());
+ Assert.assertTrue(dir.exists() && dir.isDirectory());
+ }
+ }
+
+ private Table createOrcTable(String catalog) throws Exception {
+ Table table = new Table();
+ StorageDescriptor sd = new StorageDescriptor();
+ List<FieldSchema> cols = new ArrayList<>();
+
+ table.setDbName("default");
+ table.setTableName("test_table");
+ cols.add(new FieldSchema("column_name", "int", null));
+ sd.setCols(cols);
+ sd.setSerdeInfo(new SerDeInfo());
+ sd.setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat");
+ sd.setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat");
+ table.setSd(sd);
+ table.setCatName(catalog);
+ table.setTableType("MANAGED_TABLE");
+
+ client.createTable(table);
+ Table createdTable = client.getTable(catalog, table.getDbName(), table.getTableName());
+ return createdTable;
+ }
+
+ @Test
+ public void testCreateAsAcid() throws Exception {
+ // Table created in hive catalog should have been automatically set to transactional
+ Table createdTable = createOrcTable("hive");
+ assertTrue(AcidUtils.isTransactionalTable(createdTable));
+
+ // Non-hive catalogs should not be transactional
+ createdTable = createOrcTable("spark");
+ assertFalse(AcidUtils.isTransactionalTable(createdTable));
+
+ createdTable = createOrcTable("myapp");
+ assertFalse(AcidUtils.isTransactionalTable(createdTable));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/ebd2c5f8/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
index 76069bb..33cf542 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
@@ -53,8 +53,11 @@ public final class TransactionalValidationListener extends MetaStorePreEventList
public static final String DEFAULT_TRANSACTIONAL_PROPERTY = "default";
public static final String INSERTONLY_TRANSACTIONAL_PROPERTY = "insert_only";
+ private final Set<String> supportedCatalogs = new HashSet<String>();
+
TransactionalValidationListener(Configuration conf) {
super(conf);
+ supportedCatalogs.add("hive");
}
@Override
@@ -73,11 +76,21 @@ public final class TransactionalValidationListener extends MetaStorePreEventList
}
private void handle(PreAlterTableEvent context) throws MetaException {
- handleAlterTableTransactionalProp(context);
+ if (supportedCatalogs.contains(getTableCatalog(context.getNewTable()))) {
+ handleAlterTableTransactionalProp(context);
+ }
}
private void handle(PreCreateTableEvent context) throws MetaException {
- handleCreateTableTransactionalProp(context);
+ if (supportedCatalogs.contains(getTableCatalog(context.getTable()))) {
+ handleCreateTableTransactionalProp(context);
+ }
+ }
+
+ private String getTableCatalog(Table table) {
+ String catName = table.isSetCatName() ? table.getCatName() :
+ MetaStoreUtils.getDefaultCatalog(getConf());
+ return catName.toLowerCase();
}
/**
@@ -230,7 +243,8 @@ public final class TransactionalValidationListener extends MetaStorePreEventList
newTable.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL));
return;
}
- Configuration conf = MetastoreConf.newMetastoreConf();
+
+ Configuration conf = getConf();
boolean makeAcid =
//no point making an acid table if these other props are not set since it will just throw
//exceptions when someone tries to use the table.
@@ -437,8 +451,7 @@ public final class TransactionalValidationListener extends MetaStorePreEventList
try {
Warehouse wh = hmsHandler.getWh();
if (table.getSd().getLocation() == null || table.getSd().getLocation().isEmpty()) {
- String catName = table.isSetCatName() ? table.getCatName() :
- MetaStoreUtils.getDefaultCatalog(getConf());
+ String catName = getTableCatalog(table);
tablePath = wh.getDefaultTablePath(hmsHandler.getMS().getDatabase(
catName, table.getDbName()), table);
} else {
http://git-wip-us.apache.org/repos/asf/hive/blob/ebd2c5f8/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientTest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientTest.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientTest.java
index a0e9d32..dc48fa8 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientTest.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreClientTest.java
@@ -67,7 +67,7 @@ public abstract class MetaStoreClientTest {
* @param msConf Specific MetaStore configuration values
* @param extraConf Specific other configuration values
*/
- static void startMetaStores(Map<MetastoreConf.ConfVars, String> msConf,
+ public static void startMetaStores(Map<MetastoreConf.ConfVars, String> msConf,
Map<String, String> extraConf) {
for(AbstractMetaStoreService metaStoreService : metaStoreServices) {
try {
[66/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/test/results/clientpositive/stats_nonpart.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_nonpart.q.out b/ql/src/test/results/clientpositive/stats_nonpart.q.out
new file mode 100644
index 0000000..cded846
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_nonpart.q.out
@@ -0,0 +1,500 @@
+PREHOOK: query: drop table if exists mysource
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists mysource
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table mysource (p int,key int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mysource
+POSTHOOK: query: create table mysource (p int,key int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mysource
+PREHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+PREHOOK: query: insert into mysource values (100,30), (101,50), (102,60)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,30), (101,50), (102,60)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+PREHOOK: query: drop table if exists stats_nonpartitioned
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists stats_nonpartitioned
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_nonpartitioned
+POSTHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_nonpartitioned
+PREHOOK: query: explain select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_nonpartitioned
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+0
+PREHOOK: query: desc formatted stats_nonpartitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_nonpartitioned
+POSTHOOK: query: desc formatted stats_nonpartitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_nonpartitioned
+# col_name data_type comment
+key int
+value int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ bucketing_version 2
+ numFiles 0
+ numRows 0
+ rawDataSize 0
+ totalSize 0
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+ Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: mysource
+ Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: (p = 100) (type: boolean)
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: 100 (type: int), key (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.stats_nonpartitioned
+ Write Type: INSERT
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: int)
+ outputColumnNames: key, value
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll')
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.stats_nonpartitioned
+ Write Type: INSERT
+
+ Stage: Stage-2
+ Stats Work
+ Basic Stats Work:
+ Column Stats Desc:
+ Columns: key, value
+ Column Types: int, int
+ Table: default.stats_nonpartitioned
+
+PREHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_nonpartitioned
+POSTHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_nonpartitioned
+POSTHOOK: Lineage: stats_nonpartitioned.key SIMPLE []
+POSTHOOK: Lineage: stats_nonpartitioned.value SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: desc formatted stats_nonpartitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_nonpartitioned
+POSTHOOK: query: desc formatted stats_nonpartitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_nonpartitioned
+# col_name data_type comment
+key int
+value int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ bucketing_version 2
+ numFiles 1
+ numRows 2
+ rawDataSize 0
+ totalSize 720
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_nonpartitioned
+ Statistics: Num rows: 2 Data size: 7200 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 2 Data size: 7200 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+2
+PREHOOK: query: explain select count(key) from stats_nonpartitioned
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_nonpartitioned
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_nonpartitioned
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+2
+PREHOOK: query: analyze table stats_nonpartitioned compute statistics for columns key, value
+PREHOOK: type: ANALYZE_TABLE
+PREHOOK: Input: default@stats_nonpartitioned
+PREHOOK: Output: default@stats_nonpartitioned
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table stats_nonpartitioned compute statistics for columns key, value
+POSTHOOK: type: ANALYZE_TABLE
+POSTHOOK: Input: default@stats_nonpartitioned
+POSTHOOK: Output: default@stats_nonpartitioned
+#### A masked pattern was here ####
+PREHOOK: query: explain select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_nonpartitioned
+ Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_nonpartitioned
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+2
+PREHOOK: query: explain select count(key) from stats_nonpartitioned
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_nonpartitioned
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_nonpartitioned
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_nonpartitioned
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_nonpartitioned
+#### A masked pattern was here ####
+2
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/test/results/clientpositive/stats_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_part.q.out b/ql/src/test/results/clientpositive/stats_part.q.out
new file mode 100644
index 0000000..8760dad
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_part.q.out
@@ -0,0 +1,660 @@
+PREHOOK: query: drop table if exists mysource
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists mysource
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table mysource (p int, key int, value int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mysource
+POSTHOOK: query: create table mysource (p int, key int, value int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mysource
+PREHOOK: query: insert into mysource values (100,20,201), (101,40,401), (102,50,501)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,20,201), (101,40,401), (102,50,501)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: insert into mysource values (100,21,211), (101,41,411), (102,51,511)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,21,211), (101,41,411), (102,51,511)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: drop table if exists stats_partitioned
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists stats_partitioned
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_part
+POSTHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_part
+PREHOOK: query: explain select count(key) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain select count(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Filter Operator
+ predicate: (p > 100) (type: boolean)
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Group By Operator
+ aggregations: count(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 0
+ numPartitions 0
+ numRows 0
+ rawDataSize 0
+ totalSize 0
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=100
+POSTHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=100
+POSTHOOK: Lineage: stats_part PARTITION(p=100).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=100).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=101
+POSTHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=101
+POSTHOOK: Lineage: stats_part PARTITION(p=101).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=101).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=102
+POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=102).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 3
+ numPartitions 3
+ numRows 6
+ rawDataSize 0
+ totalSize 2241
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table mysource values (103,20,200), (103,83,832), (103,53,530)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into table mysource values (103,20,200), (103,83,832), (103,53,530)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=102
+POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=102).value EXPRESSION [(mysource)mysource.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 4
+ numPartitions 3
+ numRows 8
+ rawDataSize 0
+ totalSize 2994
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: show partitions stats_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: show partitions stats_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@stats_part
+p=100
+p=101
+p=102
+PREHOOK: query: explain select count(*) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+8
+PREHOOK: query: explain select count(key) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+8
+PREHOOK: query: explain select count(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+6
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+51
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 4
+ numPartitions 3
+ numRows 8
+ rawDataSize 0
+ totalSize 2994
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+51
+PREHOOK: query: select count(value) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(value) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+8
+PREHOOK: query: select count(value) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(value) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+8
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 4
+ numPartitions 3
+ numRows 8
+ rawDataSize 0
+ totalSize 2994
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain select count(*) from stats_part where p = 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p = 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part where p = 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part where p = 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+2
+PREHOOK: query: explain select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+6
+PREHOOK: query: explain select count(key) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+8
+PREHOOK: query: explain select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+6
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+51
+PREHOOK: query: describe extended stats_part partition (p=101)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: describe extended stats_part partition (p=101)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+key int
+value string
+p int
+
+# Partition Information
+# col_name data_type comment
+p int
+
+#### A masked pattern was here ####
+PREHOOK: query: describe extended stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: describe extended stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+key int
+value string
+p int
+
+# Partition Information
+# col_name data_type comment
+p int
+
+#### A masked pattern was here ####
[67/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
HIVE-19532 : 04 patch (Steve Yeom)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1d46608e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1d46608e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1d46608e
Branch: refs/heads/master-txnstats
Commit: 1d46608e89c26ed123e96c3b79ef59b50d2349a6
Parents: 1a610cc
Author: sergey <se...@apache.org>
Authored: Mon Jun 18 14:50:31 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Mon Jun 18 14:51:14 2018 -0700
----------------------------------------------------------------------
.../listener/DummyRawStoreFailEvent.java | 45 +-
pom.xml | 2 +-
.../hive/ql/exec/ColumnStatsUpdateTask.java | 3 +
.../org/apache/hadoop/hive/ql/exec/DDLTask.java | 3 +-
.../org/apache/hadoop/hive/ql/io/AcidUtils.java | 110 +-
.../hadoop/hive/ql/lockmgr/DbTxnManager.java | 7 +
.../hadoop/hive/ql/lockmgr/DummyTxnManager.java | 1 +
.../hadoop/hive/ql/lockmgr/HiveTxnManager.java | 1 -
.../apache/hadoop/hive/ql/metadata/Hive.java | 297 +-
.../hive/ql/optimizer/StatsOptimizer.java | 56 +-
.../hive/ql/stats/BasicStatsNoJobTask.java | 4 +-
.../hadoop/hive/ql/stats/BasicStatsTask.java | 15 +-
.../hadoop/hive/ql/stats/ColStatsProcessor.java | 7 +
.../test/queries/clientpositive/stats_nonpart.q | 53 +
ql/src/test/queries/clientpositive/stats_part.q | 98 +
.../test/queries/clientpositive/stats_part2.q | 100 +
.../test/queries/clientpositive/stats_sizebug.q | 37 +
.../results/clientpositive/stats_nonpart.q.out | 500 ++
.../results/clientpositive/stats_part.q.out | 660 ++
.../results/clientpositive/stats_part2.q.out | 1261 ++++
.../results/clientpositive/stats_sizebug.q.out | 216 +
.../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp | 2426 ++++---
.../gen/thrift/gen-cpp/ThriftHiveMetastore.h | 70 +-
.../ThriftHiveMetastore_server.skeleton.cpp | 2 +-
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 6383 ++++++++++--------
.../gen/thrift/gen-cpp/hive_metastore_types.h | 412 +-
.../metastore/api/AddPartitionsRequest.java | 215 +-
.../hive/metastore/api/AddPartitionsResult.java | 126 +-
.../hadoop/hive/metastore/api/AggrStats.java | 124 +-
.../metastore/api/AlterPartitionsRequest.java | 966 +++
.../metastore/api/AlterPartitionsResponse.java | 283 +
.../hive/metastore/api/ColumnStatistics.java | 335 +-
.../hive/metastore/api/GetTableRequest.java | 219 +-
.../hive/metastore/api/GetTableResult.java | 124 +-
.../metastore/api/IsolationLevelCompliance.java | 48 +
.../hadoop/hive/metastore/api/Partition.java | 333 +-
.../hive/metastore/api/PartitionSpec.java | 337 +-
.../metastore/api/PartitionsStatsRequest.java | 219 +-
.../metastore/api/PartitionsStatsResult.java | 124 +-
.../api/SetPartitionsStatsRequest.java | 215 +-
.../apache/hadoop/hive/metastore/api/Table.java | 333 +-
.../hive/metastore/api/TableStatsRequest.java | 219 +-
.../hive/metastore/api/TableStatsResult.java | 124 +-
.../hive/metastore/api/ThriftHiveMetastore.java | 2553 ++++---
.../gen-php/metastore/ThriftHiveMetastore.php | 1231 ++--
.../src/gen/thrift/gen-php/metastore/Types.php | 905 +++
.../hive_metastore/ThriftHiveMetastore-remote | 8 +-
.../hive_metastore/ThriftHiveMetastore.py | 834 ++-
.../gen/thrift/gen-py/hive_metastore/ttypes.py | 590 +-
.../gen/thrift/gen-rb/hive_metastore_types.rb | 162 +-
.../gen/thrift/gen-rb/thrift_hive_metastore.rb | 27 +-
.../hadoop/hive/metastore/AlterHandler.java | 2 +-
.../hadoop/hive/metastore/HiveAlterHandler.java | 20 +-
.../hadoop/hive/metastore/HiveMetaStore.java | 108 +-
.../hive/metastore/HiveMetaStoreClient.java | 118 +-
.../hadoop/hive/metastore/IHMSHandler.java | 5 +
.../hadoop/hive/metastore/IMetaStoreClient.java | 45 +-
.../hadoop/hive/metastore/ObjectStore.java | 487 +-
.../apache/hadoop/hive/metastore/RawStore.java | 150 +-
.../hive/metastore/cache/CachedStore.java | 140 +-
.../hadoop/hive/metastore/model/MPartition.java | 18 +-
.../model/MPartitionColumnStatistics.java | 9 +
.../hadoop/hive/metastore/model/MTable.java | 19 +
.../metastore/model/MTableColumnStatistics.java | 9 +
.../metastore/txn/CompactionTxnHandler.java | 66 +-
.../hadoop/hive/metastore/txn/TxnDbUtil.java | 94 +
.../hadoop/hive/metastore/txn/TxnUtils.java | 20 +-
.../src/main/resources/package.jdo | 18 +
.../main/sql/derby/hive-schema-3.0.0.derby.sql | 11 +-
.../main/sql/derby/hive-schema-4.0.0.derby.sql | 10 +-
.../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql | 8 +-
.../main/sql/mssql/hive-schema-3.0.0.mssql.sql | 14 +-
.../main/sql/mssql/hive-schema-4.0.0.mssql.sql | 14 +-
.../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql | 8 +
.../main/sql/mysql/hive-schema-3.0.0.mysql.sql | 6 +
.../main/sql/mysql/hive-schema-4.0.0.mysql.sql | 6 +
.../sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql | 2 +-
.../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql | 8 +
.../sql/oracle/hive-schema-3.0.0.oracle.sql | 15 +-
.../sql/oracle/hive-schema-4.0.0.oracle.sql | 14 +-
.../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql | 2 +-
.../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql | 7 +
.../sql/postgres/hive-schema-3.0.0.postgres.sql | 19 +-
.../sql/postgres/hive-schema-4.0.0.postgres.sql | 14 +-
.../upgrade-3.1.0-to-4.0.0.postgres.sql | 8 +
.../src/main/thrift/hive_metastore.thrift | 76 +-
.../DummyRawStoreControlledCommit.java | 104 +-
.../DummyRawStoreForJdoConnection.java | 99 +-
.../HiveMetaStoreClientPreCatalog.java | 108 +-
.../metastore/client/TestAlterPartitions.java | 3 +-
.../hadoop/hive/common/ValidTxnWriteIdList.java | 4 +
.../org/apache/hive/common/util/TxnIdUtils.java | 20 +-
92 files changed, 18056 insertions(+), 7275 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 8f9a03f..498b2c6 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -266,6 +266,12 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
}
@Override
+ public Table getTable(String catName, String dbName, String tableName,
+ long txnId, String writeIdList) throws MetaException {
+ return objectStore.getTable(catName, dbName, tableName, txnId, writeIdList);
+ }
+
+ @Override
public boolean addPartition(Partition part)
throws InvalidObjectException, MetaException {
return objectStore.addPartition(part);
@@ -278,6 +284,13 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
}
@Override
+ public Partition getPartition(String catName, String dbName, String tableName,
+ List<String> partVals, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartition(catName, dbName, tableName, partVals, txnId, writeIdList);
+ }
+
+ @Override
public boolean dropPartition(String catName, String dbName, String tableName, List<String> partVals)
throws MetaException, NoSuchObjectException,
InvalidObjectException, InvalidInputException {
@@ -376,9 +389,10 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
@Override
public void alterPartitions(String catName, String dbName, String tblName,
- List<List<String>> partValsList, List<Partition> newParts)
+ List<List<String>> partValsList, List<Partition> newParts,
+ long txnId, String writeIdList)
throws InvalidObjectException, MetaException {
- objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts);
+ objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, txnId, writeIdList);
}
@Override
@@ -685,6 +699,14 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
}
@Override
+ public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
+ List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames, txnId, writeIdList);
+ }
+
+ @Override
public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
String colName)
throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
@@ -778,6 +800,17 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
}
@Override
+ public List<ColumnStatistics> getPartitionColumnStatistics(String catName, String dbName,
+ String tblName, List<String> colNames,
+ List<String> partNames,
+ long txnId,
+ String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartitionColumnStatistics(
+ catName, dbName, tblName , colNames, partNames, txnId, writeIdList);
+ }
+
+ @Override
public boolean doesPartitionExist(String catName, String dbName, String tableName,
List<FieldSchema> partKeys, List<String> partVals)
throws MetaException, NoSuchObjectException {
@@ -855,6 +888,14 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
}
@Override
+ public AggrStats get_aggr_stats_for(String catName, String dbName,
+ String tblName, List<String> partNames, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException {
+ return null;
+ }
+
+ @Override
public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
return objectStore.getNextNotification(rqst);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 5202248..4278104 100644
--- a/pom.xml
+++ b/pom.xml
@@ -66,7 +66,7 @@
</modules>
<properties>
- <hive.version.shortname>3.1.0</hive.version.shortname>
+ <hive.version.shortname>4.0.0</hive.version.shortname>
<!-- Build Properties -->
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
index a53ff5a..7795c66 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
@@ -46,11 +46,14 @@ import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
import org.apache.hadoop.hive.ql.plan.api.StageType;
+import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 8e32b02..ec32edf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -1301,8 +1301,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
throw new AssertionError("Unsupported alter materialized view type! : " + alterMVDesc.getOp());
}
- db.alterTable(mv, environmentContext);
-
+ db.alterTable(mv,environmentContext);
return 0;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 7fce67f..7d35084 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -33,6 +33,7 @@ import java.util.Properties;
import java.util.Set;
import java.util.regex.Pattern;
+import org.apache.avro.generic.GenericData;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -40,13 +41,11 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hive.common.HiveStatsUtils;
-import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
-import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
-import org.apache.hadoop.hive.common.ValidWriteIdList;
+import org.apache.hadoop.hive.common.*;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
+import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.DataOperationType;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -57,9 +56,12 @@ import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcRecordUpdater;
import org.apache.hadoop.hive.ql.io.orc.Reader;
import org.apache.hadoop.hive.ql.io.orc.Writer;
+import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
+import org.apache.hadoop.hive.ql.lockmgr.LockException;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
import org.apache.hadoop.hive.ql.plan.TableScanDesc;
+import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.shims.HadoopShims;
import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId;
import org.apache.hadoop.hive.shims.ShimLoader;
@@ -1621,6 +1623,102 @@ public class AcidUtils {
}
}
+ public static class TableSnapshot {
+ private long txnId;
+ private String validWriteIdList;
+
+ public TableSnapshot() {
+ }
+
+ public TableSnapshot(long txnId, String validWriteIdList) {
+ this.txnId = txnId;
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public long getTxnId() {
+ return txnId;
+ }
+
+ public String getValidWriteIdList() {
+ return validWriteIdList;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+ }
+
+ /**
+ * Create a TableShopshot with the given "conf"
+ * for the table of the given "tbl".
+ *
+ * @param conf
+ * @param tbl
+ * @return TableSnapshot on success, null on failure
+ * @throws LockException
+ */
+ public static TableSnapshot getTableSnapshot(
+ Configuration conf,
+ Table tbl) throws LockException {
+ if (!isTransactionalTable(tbl)) {
+ return null;
+ } else {
+ long txnId = 0;
+ ValidWriteIdList validWriteIdList = null;
+
+ HiveTxnManager sessionTxnMgr = SessionState.get().getTxnMgr();
+
+ if (sessionTxnMgr != null) {
+ txnId = sessionTxnMgr.getCurrentTxnId();
+ }
+ String fullTableName = getFullTableName(tbl.getDbName(), tbl.getTableName());
+ if (txnId > 0 && isTransactionalTable(tbl)) {
+ validWriteIdList =
+ getTableValidWriteIdList(conf, fullTableName);
+
+ if (validWriteIdList == null) {
+ validWriteIdList = getTableValidWriteIdListWithTxnList(
+ conf, tbl.getDbName(), tbl.getTableName());
+ }
+ }
+ return new TableSnapshot(txnId,
+ validWriteIdList != null ? validWriteIdList.toString() : null);
+ }
+ }
+
+ /**
+ * Returns ValidWriteIdList for the table with the given "dbName" and "tableName".
+ * This is called when HiveConf has no list for the table.
+ * Otherwise use getTableSnapshot().
+ * @param conf Configuration
+ * @param dbName
+ * @param tableName
+ * @return ValidWriteIdList on success, null on failure to get a list.
+ * @throws LockException
+ */
+ public static ValidWriteIdList getTableValidWriteIdListWithTxnList(
+ Configuration conf, String dbName, String tableName) throws LockException {
+ HiveTxnManager sessionTxnMgr = SessionState.get().getTxnMgr();
+ if (sessionTxnMgr == null) {
+ return null;
+ }
+ ValidWriteIdList validWriteIdList = null;
+ ValidTxnWriteIdList validTxnWriteIdList = null;
+
+ String validTxnList = conf.get(ValidTxnList.VALID_TXNS_KEY);
+ List<String> tablesInput = new ArrayList<>();
+ String fullTableName = getFullTableName(dbName, tableName);
+ tablesInput.add(fullTableName);
+
+ validTxnWriteIdList = sessionTxnMgr.getValidWriteIds(tablesInput, validTxnList);
+ return validTxnWriteIdList != null ?
+ validTxnWriteIdList.getTableValidWriteIdList(fullTableName) : null;
+ }
+
public static String getFullTableName(String dbName, String tableName) {
return dbName.toLowerCase() + "." + tableName.toLowerCase();
}
@@ -1908,8 +2006,8 @@ public class AcidUtils {
}
public static boolean isAcidEnabled(HiveConf hiveConf) {
- String txnMgr = hiveConf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER);
- boolean concurrency = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY);
+ String txnMgr = hiveConf.getVar(ConfVars.HIVE_TXN_MANAGER);
+ boolean concurrency = hiveConf.getBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY);
String dbTxnMgr = "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager";
if (txnMgr.equals(dbTxnMgr) && concurrency) {
return true;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 4fd1d4e..9104786 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -1017,9 +1017,16 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
@Override
public long getTableWriteId(String dbName, String tableName) throws LockException {
assert isTxnOpen();
+ return getTableWriteId(dbName, tableName, true);
+ }
+
+ private long getTableWriteId(
+ String dbName, String tableName, boolean allocateIfNotYet) throws LockException {
String fullTableName = AcidUtils.getFullTableName(dbName, tableName);
if (tableWriteIds.containsKey(fullTableName)) {
return tableWriteIds.get(fullTableName);
+ } else if (!allocateIfNotYet) {
+ return 0;
}
try {
long writeId = getMS().allocateTableWriteId(txnId, dbName, tableName);
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
index ab9d67e..78bb303 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
@@ -77,6 +77,7 @@ class DummyTxnManager extends HiveTxnManagerImpl {
public long getTableWriteId(String dbName, String tableName) throws LockException {
return 0L;
}
+
@Override
public void replAllocateTableWriteIdsBatch(String dbName, String tableName, String replPolicy,
List<TxnToWriteId> srcTxnToWriteIdList) throws LockException {
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
index 5f68e08..9ea40f4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
@@ -276,7 +276,6 @@ public interface HiveTxnManager {
* if {@code isTxnOpen()}, returns the table write ID associated with current active transaction.
*/
long getTableWriteId(String dbName, String tableName) throws LockException;
-
/**
* Allocates write id for each transaction in the list.
* @param dbName database name
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 2ec131e..3918e62 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -63,21 +63,13 @@ import javax.jdo.JDODataStoreException;
import com.google.common.collect.ImmutableList;
import org.apache.calcite.plan.RelOptMaterialization;
-import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.plan.RelOptRuleCall;
import org.apache.calcite.plan.hep.HepPlanner;
import org.apache.calcite.plan.hep.HepProgramBuilder;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.RelVisitor;
import org.apache.calcite.rel.core.Project;
import org.apache.calcite.rel.core.TableScan;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeField;
import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.sql.fun.SqlStdOperatorTable;
-import org.apache.calcite.sql.type.SqlTypeName;
-import org.apache.calcite.tools.RelBuilder;
import org.apache.commons.io.FilenameUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileChecksum;
@@ -87,13 +79,7 @@ import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.common.HiveStatsUtils;
-import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.common.ObjectPair;
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
-import org.apache.hadoop.hive.common.ValidWriteIdList;
+import org.apache.hadoop.hive.common.*;
import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable;
import org.apache.hadoop.hive.common.log.InPlaceUpdate;
@@ -114,60 +100,7 @@ import org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.ReplChangeManager;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.CmRecycleRequest;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CompactionResponse;
-import org.apache.hadoop.hive.metastore.api.CompactionType;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FireEventRequest;
-import org.apache.hadoop.hive.metastore.api.FireEventRequestData;
-import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
-import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.HiveObjectType;
-import org.apache.hadoop.hive.metastore.api.InsertEventRequestData;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.Materialization;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMPool;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator;
@@ -180,7 +113,6 @@ import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
import org.apache.hadoop.hive.ql.lockmgr.LockException;
import org.apache.hadoop.hive.ql.log.PerfLogger;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAugmentMaterializationRule;
import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils;
@@ -202,7 +134,6 @@ import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
-import org.apache.hive.common.util.TxnIdUtils;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -652,6 +583,12 @@ public class Hive {
alterTable(newTbl.getDbName(), newTbl.getTableName(), newTbl, false, environmentContext);
}
+
+ public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext)
+ throws HiveException {
+ alterTable(fullyQlfdTblName, newTbl, false, environmentContext);
+ }
+
/**
* Updates the existing table metadata with the new metadata.
*
@@ -659,13 +596,17 @@ public class Hive {
* name of the existing table
* @param newTbl
* new name of the table. could be the old name
+ * @param transactional
+ * Need to generate and save a table snapshot into the metastore?
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
* @throws TException
*/
- public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext)
+ public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext,
+ boolean transactional)
throws HiveException {
- alterTable(fullyQlfdTblName, newTbl, false, environmentContext);
+ String[] names = Utilities.getDbTableName(fullyQlfdTblName);
+ alterTable(names[0], names[1], newTbl, false, environmentContext, transactional);
}
public void alterTable(String fullyQlfdTblName, Table newTbl, boolean cascade, EnvironmentContext environmentContext)
@@ -673,9 +614,13 @@ public class Hive {
String[] names = Utilities.getDbTableName(fullyQlfdTblName);
alterTable(names[0], names[1], newTbl, cascade, environmentContext);
}
-
public void alterTable(String dbName, String tblName, Table newTbl, boolean cascade,
- EnvironmentContext environmentContext)
+ EnvironmentContext environmentContext)
+ throws HiveException {
+ alterTable(dbName, tblName, newTbl, cascade, environmentContext, true);
+ }
+ public void alterTable(String dbName, String tblName, Table newTbl, boolean cascade,
+ EnvironmentContext environmentContext, boolean transactional)
throws HiveException {
try {
@@ -690,6 +635,12 @@ public class Hive {
if (cascade) {
environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE);
}
+
+ // Take a table snapshot and set it to newTbl.
+ if (transactional) {
+ setTableSnapshotForTransactionalTable(conf, newTbl);
+ }
+
getMSC().alter_table_with_environmentContext(dbName, tblName, newTbl.getTTable(), environmentContext);
} catch (MetaException e) {
throw new HiveException("Unable to alter table. " + e.getMessage(), e);
@@ -739,6 +690,29 @@ public class Hive {
*/
public void alterPartition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
throws InvalidOperationException, HiveException {
+ alterPartition(dbName, tblName, newPart, environmentContext, true);
+ }
+
+ /**
+ * Updates the existing partition metadata with the new metadata.
+ *
+ * @param dbName
+ * name of the exiting table's database
+ * @param tblName
+ * name of the existing table
+ * @param newPart
+ * new partition
+ * @param environmentContext
+ * environment context for the method
+ * @param transactional
+ * indicates this call is for transaction stats
+ * @throws InvalidOperationException
+ * if the changes in metadata is not acceptable
+ * @throws TException
+ */
+ public void alterPartition(String dbName, String tblName, Partition newPart,
+ EnvironmentContext environmentContext, boolean transactional)
+ throws InvalidOperationException, HiveException {
try {
validatePartition(newPart);
String location = newPart.getLocation();
@@ -746,6 +720,9 @@ public class Hive {
location = Utilities.getQualifiedPath(conf, new Path(location));
newPart.setLocation(location);
}
+ if (transactional) {
+ setTableSnapshotForTransactionalPartition(conf, newPart);
+ }
getSynchronizedMSC().alter_partition(dbName, tblName, newPart.getTPartition(), environmentContext);
} catch (MetaException e) {
@@ -763,6 +740,10 @@ public class Hive {
newPart.checkValidity();
}
+ public void alterPartitions(String tblName, List<Partition> newParts, EnvironmentContext environmentContext)
+ throws InvalidOperationException, HiveException {
+ alterPartitions(tblName, newParts, environmentContext, false);
+ }
/**
* Updates the existing table metadata with the new metadata.
*
@@ -770,16 +751,23 @@ public class Hive {
* name of the existing table
* @param newParts
* new partitions
+ * @param transactional
+ * Need to generate and save a table snapshot into the metastore?
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
* @throws TException
*/
- public void alterPartitions(String tblName, List<Partition> newParts, EnvironmentContext environmentContext)
+ public void alterPartitions(String tblName, List<Partition> newParts,
+ EnvironmentContext environmentContext, boolean transactional)
throws InvalidOperationException, HiveException {
String[] names = Utilities.getDbTableName(tblName);
List<org.apache.hadoop.hive.metastore.api.Partition> newTParts =
new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>();
try {
+ AcidUtils.TableSnapshot tableSnapshot = null;
+ if (transactional) {
+ tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable());
+ }
// Remove the DDL time so that it gets refreshed
for (Partition tmpPart: newParts) {
if (tmpPart.getParameters() != null) {
@@ -792,7 +780,9 @@ public class Hive {
}
newTParts.add(tmpPart.getTPartition());
}
- getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext);
+ getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext,
+ tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
+ tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
} catch (MetaException e) {
throw new HiveException("Unable to alter partition. " + e.getMessage(), e);
} catch (TException e) {
@@ -923,6 +913,8 @@ public class Hive {
tTbl.setPrivileges(principalPrivs);
}
}
+ // Set table snapshot to api.Table to make it persistent.
+ setTableSnapshotForTransactionalTable(conf, tbl);
if (primaryKeys == null && foreignKeys == null
&& uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null
&& checkConstraints == null) {
@@ -1125,7 +1117,27 @@ public class Hive {
* @throws HiveException
*/
public Table getTable(final String dbName, final String tableName,
- boolean throwException) throws HiveException {
+ boolean throwException) throws HiveException {
+ return this.getTable(dbName, tableName, throwException, false);
+ }
+
+ /**
+ * Returns metadata of the table
+ *
+ * @param dbName
+ * the name of the database
+ * @param tableName
+ * the name of the table
+ * @param throwException
+ * controls whether an exception is thrown or a returns a null
+ * @param checkTransactional
+ * checks whether the metadata table stats are valid (or
+ * compilant with the snapshot isolation of) for the current transaction.
+ * @return the table or if throwException is false a null value.
+ * @throws HiveException
+ */
+ public Table getTable(final String dbName, final String tableName,
+ boolean throwException, boolean checkTransactional) throws HiveException {
if (tableName == null || tableName.equals("")) {
throw new HiveException("empty table creation??");
@@ -1134,7 +1146,19 @@ public class Hive {
// Get the table from metastore
org.apache.hadoop.hive.metastore.api.Table tTable = null;
try {
- tTable = getMSC().getTable(dbName, tableName);
+ if (checkTransactional) {
+ ValidWriteIdList validWriteIdList = null;
+ long txnId = SessionState.get().getTxnMgr() != null ?
+ SessionState.get().getTxnMgr().getCurrentTxnId() : 0;
+ if (txnId > 0) {
+ validWriteIdList = AcidUtils.getTableValidWriteIdListWithTxnList(conf,
+ dbName, tableName);
+ }
+ tTable = getMSC().getTable(dbName, tableName, txnId,
+ validWriteIdList != null ? validWriteIdList.toString() : null);
+ } else {
+ tTable = getMSC().getTable(dbName, tableName);
+ }
} catch (NoSuchObjectException e) {
if (throwException) {
LOG.error("Table " + dbName + "." + tableName + " not found: " + e.getMessage());
@@ -1791,6 +1815,7 @@ public class Hive {
Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath);
alterPartitionSpecInMemory(tbl, partSpec, newTPart.getTPartition(), inheritTableSpecs, newPartPath.toString());
validatePartition(newTPart);
+ setTableSnapshotForTransactionalPartition(conf, newTPart);
// Generate an insert event only if inserting into an existing partition
// When inserting into a new partition, the add partition event takes care of insert event
@@ -2424,8 +2449,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
*/
public Partition createPartition(Table tbl, Map<String, String> partSpec) throws HiveException {
try {
- return new Partition(tbl, getMSC().add_partition(
- Partition.createMetaPartitionObject(tbl, partSpec, null)));
+ org.apache.hadoop.hive.metastore.api.Partition part =
+ Partition.createMetaPartitionObject(tbl, partSpec, null);
+ AcidUtils.TableSnapshot tableSnapshot =
+ AcidUtils.getTableSnapshot(conf, tbl);
+ part.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : 0);
+ part.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
+ return new Partition(tbl, getMSC().add_partition(part));
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
@@ -2437,8 +2467,16 @@ private void constructOneLBLocationMap(FileStatus fSta,
int size = addPartitionDesc.getPartitionCount();
List<org.apache.hadoop.hive.metastore.api.Partition> in =
new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>(size);
+ AcidUtils.TableSnapshot tableSnapshot =
+ AcidUtils.getTableSnapshot(conf, tbl);
for (int i = 0; i < size; ++i) {
- in.add(convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf));
+ org.apache.hadoop.hive.metastore.api.Partition tmpPart =
+ convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf);
+ if (tmpPart != null && tableSnapshot != null && tableSnapshot.getTxnId() > 0) {
+ tmpPart.setTxnId(tableSnapshot.getTxnId());
+ tmpPart.setValidWriteIdList(tableSnapshot.getValidWriteIdList());
+ }
+ in.add(tmpPart);
}
List<Partition> out = new ArrayList<Partition>();
try {
@@ -2633,7 +2671,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
fullName = tbl.getFullyQualifiedName();
}
- alterPartition(fullName, new Partition(tbl, tpart), null);
+ Partition newPart = new Partition(tbl, tpart);
+ alterPartition(fullName, newPart, null);
}
private void alterPartitionSpecInMemory(Table tbl,
@@ -4359,8 +4398,16 @@ private void constructOneLBLocationMap(FileStatus fSta,
}
}
- public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws HiveException {
+ public boolean setPartitionColumnStatistics(
+ SetPartitionsStatsRequest request) throws HiveException {
try {
+ ColumnStatistics colStat = request.getColStats().get(0);
+ ColumnStatisticsDesc statsDesc = colStat.getStatsDesc();
+ Table tbl = getTable(statsDesc.getDbName(), statsDesc.getTableName());
+
+ AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
+ request.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : 0);
+ request.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
return getMSC().setPartitionColumnStatistics(request);
} catch (Exception e) {
LOG.debug(StringUtils.stringifyException(e));
@@ -4370,8 +4417,27 @@ private void constructOneLBLocationMap(FileStatus fSta,
public List<ColumnStatisticsObj> getTableColumnStatistics(
String dbName, String tableName, List<String> colNames) throws HiveException {
+ return getTableColumnStatistics(dbName, tableName, colNames, false);
+ }
+
+ public List<ColumnStatisticsObj> getTableColumnStatistics(
+ String dbName, String tableName, List<String> colNames, boolean checkTransactional)
+ throws HiveException {
+
+ List<ColumnStatisticsObj> retv = null;
try {
- return getMSC().getTableColumnStatistics(dbName, tableName, colNames);
+ if (checkTransactional) {
+ Table tbl = getTable(dbName, tableName);
+ AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
+ if (tableSnapshot.getTxnId() > 0) {
+ retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames,
+ tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
+ tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
+ }
+ } else {
+ retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames);
+ }
+ return retv;
} catch (Exception e) {
LOG.debug(StringUtils.stringifyException(e));
throw new HiveException(e);
@@ -4380,8 +4446,25 @@ private void constructOneLBLocationMap(FileStatus fSta,
public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(String dbName,
String tableName, List<String> partNames, List<String> colNames) throws HiveException {
- try {
- return getMSC().getPartitionColumnStatistics(dbName, tableName, partNames, colNames);
+ return getPartitionColumnStatistics(dbName, tableName, partNames, colNames, false);
+ }
+
+ public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+ String dbName, String tableName, List<String> partNames, List<String> colNames,
+ boolean checkTransactional)
+ throws HiveException {
+ long txnId = -1;
+ String writeIdList = null;
+ try {
+ if (checkTransactional) {
+ Table tbl = getTable(dbName, tableName);
+ AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
+ txnId = tableSnapshot != null ? tableSnapshot.getTxnId() : -1;
+ writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null;
+ }
+
+ return getMSC().getPartitionColumnStatistics(dbName, tableName, partNames, colNames,
+ txnId, writeIdList);
} catch (Exception e) {
LOG.debug(StringUtils.stringifyException(e));
throw new HiveException(e);
@@ -4390,8 +4473,22 @@ private void constructOneLBLocationMap(FileStatus fSta,
public AggrStats getAggrColStatsFor(String dbName, String tblName,
List<String> colNames, List<String> partName) {
- try {
- return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName);
+ return getAggrColStatsFor(dbName, tblName, colNames, partName, false);
+ }
+
+ public AggrStats getAggrColStatsFor(String dbName, String tblName,
+ List<String> colNames, List<String> partName, boolean checkTransactional) {
+ long txnId = -1;
+ String writeIdList = null;
+ try {
+ if (checkTransactional) {
+ Table tbl = getTable(dbName, tblName);
+ AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
+ txnId = tableSnapshot != null ? tableSnapshot.getTxnId() : -1;
+ writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null;
+ }
+ return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName,
+ txnId, writeIdList);
} catch (Exception e) {
LOG.debug(StringUtils.stringifyException(e));
return new AggrStats(new ArrayList<ColumnStatisticsObj>(),0);
@@ -5189,4 +5286,26 @@ private void constructOneLBLocationMap(FileStatus fSta,
throw new HiveException(e);
}
}
+
+ private void setTableSnapshotForTransactionalTable(
+ HiveConf conf, Table newTbl)
+ throws LockException {
+
+ org.apache.hadoop.hive.metastore.api.Table newTTbl = newTbl.getTTable();
+ AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, newTbl);
+
+ newTTbl.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : -1);
+ newTTbl.setValidWriteIdList(
+ tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
+ }
+
+ private void setTableSnapshotForTransactionalPartition(HiveConf conf, Partition partition)
+ throws LockException {
+
+ AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, partition.getTable());
+ org.apache.hadoop.hive.metastore.api.Partition tpartition = partition.getTPartition();
+ tpartition.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : -1);
+ tpartition.setValidWriteIdList(
+ tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
index 857f300..4d69f4c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.lib.Rule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
+import org.apache.hadoop.hive.ql.lockmgr.LockException;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -282,7 +283,17 @@ public class StatsOptimizer extends Transform {
// limit. In order to be safe, we do not use it now.
return null;
}
+
+ Hive hive = Hive.get(pctx.getConf());
Table tbl = tsOp.getConf().getTableMetadata();
+ boolean isTransactionalTable = AcidUtils.isTransactionalTable(tbl);
+
+ // If the table is transactional, get stats state by calling getTable() with
+ // transactional flag on to check the validity of table stats.
+ if (isTransactionalTable) {
+ tbl = hive.getTable(tbl.getDbName(), tbl.getTableName(), true, true);
+ }
+
if (MetaStoreUtils.isExternalTable(tbl.getTTable())) {
Logger.info("Table " + tbl.getTableName() + " is external. Skip StatsOptimizer.");
return null;
@@ -291,11 +302,7 @@ public class StatsOptimizer extends Transform {
Logger.info("Table " + tbl.getTableName() + " is non Native table. Skip StatsOptimizer.");
return null;
}
- if (AcidUtils.isTransactionalTable(tbl)) {
- //todo: should this be OK for MM table?
- Logger.info("Table " + tbl.getTableName() + " is ACID table. Skip StatsOptimizer.");
- return null;
- }
+
Long rowCnt = getRowCnt(pctx, tsOp, tbl);
// if we can not have correct table stats, then both the table stats and column stats are not useful.
if (rowCnt == null) {
@@ -375,7 +382,8 @@ public class StatsOptimizer extends Transform {
List<Object> oneRow = new ArrayList<Object>();
- Hive hive = Hive.get(pctx.getConf());
+ AcidUtils.TableSnapshot tableSnapshot =
+ AcidUtils.getTableSnapshot(pctx.getConf(), tbl);
for (AggregationDesc aggr : pgbyOp.getConf().getAggregators()) {
if (aggr.getDistinct()) {
@@ -462,8 +470,13 @@ public class StatsOptimizer extends Transform {
+ " are not up to date.");
return null;
}
- List<ColumnStatisticsObj> stats = hive.getMSC().getTableColumnStatistics(
- tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName));
+
+ List<ColumnStatisticsObj> stats =
+ hive.getMSC().getTableColumnStatistics(
+ tbl.getDbName(), tbl.getTableName(),
+ Lists.newArrayList(colName),
+ tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
+ tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
if (stats.isEmpty()) {
Logger.debug("No stats for " + tbl.getTableName() + " column " + colName);
return null;
@@ -523,8 +536,13 @@ public class StatsOptimizer extends Transform {
+ " are not up to date.");
return null;
}
- List<ColumnStatisticsObj> stats = hive.getMSC().getTableColumnStatistics(
- tbl.getDbName(),tbl.getTableName(), Lists.newArrayList(colName));
+
+ List<ColumnStatisticsObj> stats =
+ hive.getMSC().getTableColumnStatistics(
+ tbl.getDbName(), tbl.getTableName(),
+ Lists.newArrayList(colName),
+ tableSnapshot.getTxnId(),
+ tableSnapshot.getValidWriteIdList());
if (stats.isEmpty()) {
Logger.debug("No stats for " + tbl.getTableName() + " column " + colName);
return null;
@@ -664,9 +682,12 @@ public class StatsOptimizer extends Transform {
+ " are not up to date.");
return null;
}
- ColumnStatisticsData statData = hive.getMSC().getTableColumnStatistics(
- tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName))
- .get(0).getStatsData();
+ ColumnStatisticsData statData =
+ hive.getMSC().getTableColumnStatistics(
+ tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName),
+ tableSnapshot.getTxnId(),
+ tableSnapshot.getValidWriteIdList())
+ .get(0).getStatsData();
String name = colDesc.getTypeString().toUpperCase();
switch (type) {
case Integer: {
@@ -887,7 +908,7 @@ public class StatsOptimizer extends Transform {
}
private Collection<List<ColumnStatisticsObj>> verifyAndGetPartColumnStats(
- Hive hive, Table tbl, String colName, Set<Partition> parts) throws TException {
+ Hive hive, Table tbl, String colName, Set<Partition> parts) throws TException, LockException {
List<String> partNames = new ArrayList<String>(parts.size());
for (Partition part : parts) {
if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(part.getTable(), part.getParameters(), colName)) {
@@ -897,8 +918,13 @@ public class StatsOptimizer extends Transform {
}
partNames.add(part.getName());
}
+ AcidUtils.TableSnapshot tableSnapshot =
+ AcidUtils.getTableSnapshot(hive.getConf(), tbl);
+
Map<String, List<ColumnStatisticsObj>> result = hive.getMSC().getPartitionColumnStatistics(
- tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName));
+ tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName),
+ tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
+ tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
if (result.size() != parts.size()) {
Logger.debug("Received " + result.size() + " stats for " + parts.size() + " partitions");
return null;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
index d4d46a3..9a271a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
@@ -344,12 +344,12 @@ public class BasicStatsNoJobTask implements IStatsProcessor {
}
if (values.get(0).result instanceof Table) {
- db.alterTable(tableFullName, (Table) values.get(0).result, environmentContext);
+ db.alterTable(tableFullName, (Table) values.get(0).result, environmentContext, true);
LOG.debug("Updated stats for {}.", tableFullName);
} else {
if (values.get(0).result instanceof Partition) {
List<Partition> results = Lists.transform(values, FooterStatCollector.EXTRACT_RESULT_FUNCTION);
- db.alterPartitions(tableFullName, results, environmentContext);
+ db.alterPartitions(tableFullName, results, environmentContext, true);
LOG.debug("Bulk updated {} partitions of {}.", results.size(), tableFullName);
} else {
throw new RuntimeException("inconsistent");
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
index 8c23887..0a2992d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java
@@ -127,10 +127,7 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
public Object process(StatsAggregator statsAggregator) throws HiveException, MetaException {
Partish p = partish;
Map<String, String> parameters = p.getPartParameters();
- if (p.isTransactionalTable()) {
- // TODO: this should also happen on any error. Right now this task will just fail.
- StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.FALSE);
- } else if (work.isTargetRewritten()) {
+ if (work.isTargetRewritten()) {
StatsSetupConst.setBasicStatsState(parameters, StatsSetupConst.TRUE);
}
@@ -208,12 +205,6 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
private void updateStats(StatsAggregator statsAggregator, Map<String, String> parameters,
String aggKey, boolean isFullAcid) throws HiveException {
for (String statType : StatsSetupConst.statsRequireCompute) {
- if (isFullAcid && !work.isTargetRewritten()) {
- // Don't bother with aggregation in this case, it will probably be invalid.
- parameters.remove(statType);
- continue;
- }
-
String value = statsAggregator.aggregateStats(aggKey, statType);
if (value != null && !value.isEmpty()) {
long longValue = Long.parseLong(value);
@@ -272,7 +263,7 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
if (res == null) {
return 0;
}
- db.alterTable(tableFullName, res, environmentContext);
+ db.alterTable(tableFullName, res, environmentContext, true);
if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) {
console.printInfo("Table " + tableFullName + " stats: [" + toString(p.getPartParameters()) + ']');
@@ -340,7 +331,7 @@ public class BasicStatsTask implements Serializable, IStatsProcessor {
}
if (!updates.isEmpty()) {
- db.alterPartitions(tableFullName, updates, environmentContext);
+ db.alterPartitions(tableFullName, updates, environmentContext, true);
}
if (work.isStatsReliable() && updates.size() != processors.size()) {
LOG.info("Stats should be reliadble...however seems like there were some issue.. => ret 1");
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
index d4cfd0a..acebf52 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
@@ -34,12 +34,14 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.FetchOperator;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.plan.ColumnStatsDesc;
import org.apache.hadoop.hive.ql.plan.FetchWork;
+import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
@@ -176,6 +178,11 @@ public class ColStatsProcessor implements IStatsProcessor {
}
SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(colStats);
request.setNeedMerge(colStatDesc.isNeedMerge());
+ if (AcidUtils.isTransactionalTable(tbl) && SessionState.get().getTxnMgr() != null) {
+ request.setTxnId(SessionState.get().getTxnMgr().getCurrentTxnId());
+ request.setValidWriteIdList(AcidUtils.getTableValidWriteIdList(conf,
+ AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName())).toString());
+ }
db.setPartitionColumnStatistics(request);
return 0;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/test/queries/clientpositive/stats_nonpart.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats_nonpart.q b/ql/src/test/queries/clientpositive/stats_nonpart.q
new file mode 100644
index 0000000..f6019cc
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/stats_nonpart.q
@@ -0,0 +1,53 @@
+set hive.stats.dbclass=fs;
+set hive.stats.fetch.column.stats=true;
+set datanucleus.cache.collections=false;
+
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
+
+set hive.stats.autogather=true;
+set hive.stats.column.autogather=true;
+set hive.compute.query.using.stats=true;
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.query.results.cache.enabled=false;
+
+-- create source.
+drop table if exists mysource;
+create table mysource (p int,key int);
+insert into mysource values (100,20), (101,40), (102,50);
+insert into mysource values (100,30), (101,50), (102,60);
+
+-- test nonpartitioned table
+drop table if exists stats_nonpartitioned;
+
+--create table stats_nonpartitioned(key int, value int) stored as orc;
+create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true");
+--create table stats_nonpartitioned(key int, value int) stored as orc tblproperties tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+
+
+explain select count(*) from stats_nonpartitioned;
+select count(*) from stats_nonpartitioned;
+desc formatted stats_nonpartitioned;
+
+explain insert into table stats_nonpartitioned select * from mysource where p == 100;
+insert into table stats_nonpartitioned select * from mysource where p == 100;
+
+desc formatted stats_nonpartitioned;
+
+explain select count(*) from stats_nonpartitioned;
+select count(*) from stats_nonpartitioned;
+explain select count(key) from stats_nonpartitioned;
+select count(key) from stats_nonpartitioned;
+
+--analyze table stats_nonpartitioned compute statistics;
+analyze table stats_nonpartitioned compute statistics for columns key, value;
+
+explain select count(*) from stats_nonpartitioned;
+select count(*) from stats_nonpartitioned;
+explain select count(key) from stats_nonpartitioned;
+select count(key) from stats_nonpartitioned;
+
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/test/queries/clientpositive/stats_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats_part.q b/ql/src/test/queries/clientpositive/stats_part.q
new file mode 100644
index 0000000..d0812e1
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/stats_part.q
@@ -0,0 +1,98 @@
+set hive.stats.dbclass=fs;
+set hive.stats.fetch.column.stats=true;
+set datanucleus.cache.collections=false;
+
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
+
+set hive.stats.autogather=true;
+set hive.stats.column.autogather=true;
+set hive.compute.query.using.stats=true;
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.query.results.cache.enabled=false;
+
+-- create source.
+drop table if exists mysource;
+create table mysource (p int, key int, value int);
+insert into mysource values (100,20,201), (101,40,401), (102,50,501);
+insert into mysource values (100,21,211), (101,41,411), (102,51,511);
+
+--explain select count(*) from mysource;
+--select count(*) from mysource;
+
+-- Gather col stats manually
+--analyze table mysource compute statistics for columns p, key;
+
+--explain select count(*) from mysource;
+--select count(*) from mysource;
+--explain select count(key) from mysource;
+--select count(key) from mysource;
+
+-- test partitioned table
+drop table if exists stats_partitioned;
+
+--create table stats_part(key int,value string) partitioned by (p int) stored as orc;
+create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true");
+--create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+
+explain select count(key) from stats_part;
+--select count(*) from stats_part;
+--explain select count(*) from stats_part where p = 100;
+--select count(*) from stats_part where p = 100;
+explain select count(key) from stats_part where p > 100;
+--select count(*) from stats_part where p > 100;
+desc formatted stats_part;
+
+--explain insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100;
+insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100;
+insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101;
+insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102;
+
+desc formatted stats_part;
+
+insert into table mysource values (103,20,200), (103,83,832), (103,53,530);
+insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102;
+
+desc formatted stats_part;
+show partitions stats_part;
+
+explain select count(*) from stats_part;
+select count(*) from stats_part;
+explain select count(key) from stats_part;
+select count(key) from stats_part;
+explain select count(key) from stats_part where p > 100;
+select count(key) from stats_part where p > 100;
+explain select max(key) from stats_part where p > 100;
+select max(key) from stats_part where p > 100;
+
+--update stats_part set key = key + 100 where key in(-50,40) and p > 100;
+desc formatted stats_part;
+explain select max(key) from stats_part where p > 100;
+select max(key) from stats_part where p > 100;
+
+select count(value) from stats_part;
+--update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100;
+select count(value) from stats_part;
+
+--delete from stats_part where key in (20, 41);
+desc formatted stats_part;
+
+explain select count(*) from stats_part where p = 100;
+select count(*) from stats_part where p = 100;
+explain select count(*) from stats_part where p > 100;
+select count(*) from stats_part where p > 100;
+explain select count(key) from stats_part;
+select count(key) from stats_part;
+explain select count(*) from stats_part where p > 100;
+select count(*) from stats_part where p > 100;
+explain select max(key) from stats_part where p > 100;
+select max(key) from stats_part where p > 100;
+
+describe extended stats_part partition (p=101);
+describe extended stats_part;
+
+
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/test/queries/clientpositive/stats_part2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats_part2.q b/ql/src/test/queries/clientpositive/stats_part2.q
new file mode 100644
index 0000000..24be218
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/stats_part2.q
@@ -0,0 +1,100 @@
+set hive.stats.dbclass=fs;
+set hive.stats.fetch.column.stats=true;
+set datanucleus.cache.collections=false;
+
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
+
+set hive.stats.autogather=true;
+set hive.stats.column.autogather=true;
+set hive.compute.query.using.stats=true;
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.query.results.cache.enabled=false;
+
+-- create source.
+drop table if exists mysource;
+create table mysource (p int, key int, value string);
+insert into mysource values (100,20,'value20'), (101,40,'string40'), (102,50,'string50');
+insert into mysource values (100,21,'value21'), (101,41,'value41'), (102,51,'value51');
+
+-- test partitioned table
+drop table if exists stats_partitioned;
+
+--create table stats_part(key int,value string) partitioned by (p int) stored as orc;
+create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true");
+--create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+
+--explain select count(*) from stats_part;
+--select count(*) from stats_part;
+--explain select count(*) from stats_part where p = 100;
+--select count(*) from stats_part where p = 100;
+explain select count(*) from stats_part where p > 100;
+explain select max(key) from stats_part where p > 100;
+--select count(*) from stats_part where p > 100;
+desc formatted stats_part;
+
+--explain insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100;
+insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100;
+insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101;
+insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102;
+
+desc formatted stats_part;
+explain select count(key) from stats_part where p > 100;
+explain select max(key) from stats_part where p > 100;
+
+insert into table mysource values (103,20,'value20'), (103,83,'value83'), (103,53,'value53');
+insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102;
+
+desc formatted stats_part;
+show partitions stats_part;
+
+explain select count(*) from stats_part;
+select count(*) from stats_part;
+explain select count(key) from stats_part;
+select count(key) from stats_part;
+explain select count(key) from stats_part where p > 100;
+select count(key) from stats_part where p > 100;
+explain select max(key) from stats_part where p > 100;
+select max(key) from stats_part where p > 100;
+
+desc formatted stats_part partition(p = 100);
+desc formatted stats_part partition(p = 101);
+desc formatted stats_part partition(p = 102);
+update stats_part set key = key + 100 where key in(-50,40) and p > 100;
+explain select max(key) from stats_part where p > 100;
+select max(key) from stats_part where p > 100;
+desc formatted stats_part partition(p = 100);
+desc formatted stats_part partition(p = 101);
+desc formatted stats_part partition(p = 102);
+
+select count(value) from stats_part;
+update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100;
+desc formatted stats_part partition(p = 100);
+desc formatted stats_part partition(p = 101);
+desc formatted stats_part partition(p = 102);
+select count(value) from stats_part;
+
+delete from stats_part where key in (20, 41);
+desc formatted stats_part partition(p = 100);
+desc formatted stats_part partition(p = 101);
+desc formatted stats_part partition(p = 102);
+
+explain select count(*) from stats_part where p = 100;
+select count(*) from stats_part where p = 100;
+explain select count(*) from stats_part where p > 100;
+select count(*) from stats_part where p > 100;
+explain select count(key) from stats_part;
+select count(key) from stats_part;
+explain select count(*) from stats_part where p > 100;
+select count(*) from stats_part where p > 100;
+explain select max(key) from stats_part where p > 100;
+select max(key) from stats_part where p > 100;
+
+describe extended stats_part partition (p=101);
+describe extended stats_part;
+
+
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/test/queries/clientpositive/stats_sizebug.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats_sizebug.q b/ql/src/test/queries/clientpositive/stats_sizebug.q
new file mode 100644
index 0000000..7108766
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/stats_sizebug.q
@@ -0,0 +1,37 @@
+set hive.stats.dbclass=fs;
+set hive.stats.fetch.column.stats=true;
+set datanucleus.cache.collections=false;
+
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
+
+set hive.stats.autogather=true;
+set hive.stats.column.autogather=true;
+set hive.compute.query.using.stats=true;
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.query.results.cache.enabled=false;
+
+-- create source.
+drop table if exists mysource;
+create table mysource (p int,key int);
+insert into mysource values (100,20), (101,40), (102,50);
+insert into mysource values (100,20), (101,40), (102,50);
+
+-- test nonpartitioned table
+drop table if exists stats_nonpartitioned;
+
+--create table stats_nonpartitioned(key int, value int) stored as orc;
+create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true");
+--create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+explain insert into table stats_nonpartitioned select * from mysource where p == 100;
+insert into table stats_nonpartitioned select * from mysource where p == 100;
+
+desc formatted stats_nonpartitioned;
+analyze table mysource compute statistics for columns p, key;
+desc formatted stats_nonpartitioned;
+
+
[36/67] [abbrv] hive git commit: HIVE-19366: Vectorization causing
TestStreaming.testStreamBucketingMatchesRegularBucketing to fail (Prasanth
Jayachandran reviewed by Eugene Koifman)
Posted by se...@apache.org.
HIVE-19366: Vectorization causing TestStreaming.testStreamBucketingMatchesRegularBucketing to fail (Prasanth Jayachandran reviewed by Eugene Koifman)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3eaca1f4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3eaca1f4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3eaca1f4
Branch: refs/heads/master-txnstats
Commit: 3eaca1f4b29fcace7379675f530dfdc7434b862d
Parents: b100483
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Sat Jun 16 20:54:31 2018 -0700
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Sat Jun 16 20:54:31 2018 -0700
----------------------------------------------------------------------
.../src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java | 2 --
1 file changed, 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/3eaca1f4/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index 13aa5e9..5e5bc83 100644
--- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -439,8 +439,6 @@ public class TestStreaming {
String tableLoc = "'" + dbUri + Path.SEPARATOR + "streamedtable" + "'";
String tableLoc2 = "'" + dbUri + Path.SEPARATOR + "finaltable" + "'";
String tableLoc3 = "'" + dbUri + Path.SEPARATOR + "nobucket" + "'";
- // disabling vectorization as this test yields incorrect results with vectorization
- conf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false);
try (IDriver driver = DriverFactory.newDriver(conf)) {
runDDL(driver, "create database testBucketing3");
runDDL(driver, "use testBucketing3");
[38/67] [abbrv] hive git commit: HIVE-19904 : Load data rewrite into
Tez job fails for ACID (Deepak Jaiswal, reviewed by Eugene Koifman)
Posted by se...@apache.org.
HIVE-19904 : Load data rewrite into Tez job fails for ACID (Deepak Jaiswal, reviewed by Eugene Koifman)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/24da4603
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/24da4603
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/24da4603
Branch: refs/heads/master-txnstats
Commit: 24da46034fb47aa6ece963b2d08ebc3c1362961e
Parents: d60bc73
Author: Deepak Jaiswal <dj...@apache.org>
Authored: Sun Jun 17 10:49:48 2018 -0700
Committer: Deepak Jaiswal <dj...@apache.org>
Committed: Sun Jun 17 10:49:48 2018 -0700
----------------------------------------------------------------------
.../hive/ql/parse/LoadSemanticAnalyzer.java | 2 +-
.../apache/hadoop/hive/ql/TestTxnLoadData.java | 6 +-
.../llap/load_data_using_job.q.out | 108 +++++++++----------
3 files changed, 58 insertions(+), 58 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/24da4603/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index 189975e..cbacd05 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -75,7 +75,7 @@ public class LoadSemanticAnalyzer extends SemanticAnalyzer {
private static final Logger LOG = LoggerFactory.getLogger(LoadSemanticAnalyzer.class);
private boolean queryReWritten = false;
- private final String tempTblNameSuffix = "__TEMP_TABLE_FOR_LOAD_DATA__";
+ private final String tempTblNameSuffix = "__temp_table_for_load_data__";
// AST specific data
private Tree fromTree, tableTree;
http://git-wip-us.apache.org/repos/asf/hive/blob/24da4603/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
index fb88f25..45f9e52 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java
@@ -378,10 +378,10 @@ public class TestTxnLoadData extends TxnCommandsBaseForTests {
runStatementOnDriver("create table Tstage (a int, b int) stored as orc tblproperties('transactional'='false')");
//this creates an ORC data file with correct schema under table root
runStatementOnDriver("insert into Tstage values(1,2),(3,4)");
- CommandProcessorResponse cpr = runStatementOnDriverNegative("load data local inpath '" + getWarehouseDir() + "' into table T");
- // This condition should not occur with the new support of rewriting load into IAS.
- Assert.assertFalse(cpr.getErrorMessage().contains("Load into bucketed tables are disabled"));
+ // This will work with the new support of rewriting load into IAS.
+ runStatementOnDriver("load data local inpath '" + getWarehouseDir() + "/Tstage' into table T");
}
+
private void checkExpected(List<String> rs, String[][] expected, String msg) {
super.checkExpected(rs, expected, msg, LOG, true);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/24da4603/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out b/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out
index 7a62be2..21fd933 100644
--- a/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out
+++ b/ql/src/test/results/clientpositive/llap/load_data_using_job.q.out
@@ -240,14 +240,14 @@ STAGE PLANS:
PREHOOK: query: load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin_n8
PREHOOK: type: QUERY
-PREHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+PREHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
PREHOOK: Output: default@srcbucket_mapjoin_n8
POSTHOOK: query: load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin_n8
POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+POSTHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
PREHOOK: query: select * from srcbucket_mapjoin_n8
PREHOOK: type: QUERY
PREHOOK: Input: default@srcbucket_mapjoin_n8
@@ -449,17 +449,17 @@ STAGE PLANS:
PREHOOK: query: load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin_n8
PREHOOK: type: QUERY
-PREHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+PREHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
PREHOOK: Output: default@srcbucket_mapjoin_n8
POSTHOOK: query: load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin_n8
POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+POSTHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08/hr=0
POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08/hr=1
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
PREHOOK: query: select * from srcbucket_mapjoin_n8
PREHOOK: type: QUERY
PREHOOK: Input: default@srcbucket_mapjoin_n8
@@ -663,17 +663,17 @@ STAGE PLANS:
PREHOOK: query: load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin_n8
PREHOOK: type: QUERY
-PREHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+PREHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
PREHOOK: Output: default@srcbucket_mapjoin_n8
POSTHOOK: query: load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin_n8
POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+POSTHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08/hr=0
POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08/hr=1
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
PREHOOK: query: select * from srcbucket_mapjoin_n8
PREHOOK: type: QUERY
PREHOOK: Input: default@srcbucket_mapjoin_n8
@@ -1008,14 +1008,14 @@ STAGE PLANS:
PREHOOK: query: load data local inpath '../../data/files/load_data_job/bucketing.txt' INTO TABLE srcbucket_mapjoin_n8
PREHOOK: type: QUERY
-PREHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+PREHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
PREHOOK: Output: default@srcbucket_mapjoin_n8
POSTHOOK: query: load data local inpath '../../data/files/load_data_job/bucketing.txt' INTO TABLE srcbucket_mapjoin_n8
POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+POSTHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
POSTHOOK: Output: default@srcbucket_mapjoin_n8
-POSTHOOK: Lineage: srcbucket_mapjoin_n8.key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8.value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8.key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8.value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
PREHOOK: query: select * from srcbucket_mapjoin_n8
PREHOOK: type: QUERY
PREHOOK: Input: default@srcbucket_mapjoin_n8
@@ -1230,14 +1230,14 @@ STAGE PLANS:
PREHOOK: query: load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin_n8
PREHOOK: type: QUERY
-PREHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+PREHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
PREHOOK: Output: default@srcbucket_mapjoin_n8
POSTHOOK: query: load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin_n8
POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+POSTHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
PREHOOK: query: select * from srcbucket_mapjoin_n8
PREHOOK: type: QUERY
PREHOOK: Input: default@srcbucket_mapjoin_n8
@@ -1455,17 +1455,17 @@ STAGE PLANS:
PREHOOK: query: load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin_n8
PREHOOK: type: QUERY
-PREHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+PREHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
PREHOOK: Output: default@srcbucket_mapjoin_n8
POSTHOOK: query: load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin_n8
POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+POSTHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08/hr=0
POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08/hr=1
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
PREHOOK: query: select * from srcbucket_mapjoin_n8
PREHOOK: type: QUERY
PREHOOK: Input: default@srcbucket_mapjoin_n8
@@ -1685,17 +1685,17 @@ STAGE PLANS:
PREHOOK: query: load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin_n8
PREHOOK: type: QUERY
-PREHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+PREHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
PREHOOK: Output: default@srcbucket_mapjoin_n8
POSTHOOK: query: load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin_n8
POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+POSTHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08/hr=0
POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08/hr=1
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
PREHOOK: query: select * from srcbucket_mapjoin_n8
PREHOOK: type: QUERY
PREHOOK: Input: default@srcbucket_mapjoin_n8
@@ -2033,17 +2033,17 @@ STAGE PLANS:
PREHOOK: query: load data local inpath '../../data/files/load_data_job/partitions' INTO TABLE srcbucket_mapjoin_n8
PREHOOK: type: QUERY
-PREHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+PREHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
PREHOOK: Output: default@srcbucket_mapjoin_n8
POSTHOOK: query: load data local inpath '../../data/files/load_data_job/partitions' INTO TABLE srcbucket_mapjoin_n8
POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+POSTHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08/hr=0
POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08/hr=1
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=0).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08,hr=1).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
PREHOOK: query: select * from srcbucket_mapjoin_n8
PREHOOK: type: QUERY
PREHOOK: Input: default@srcbucket_mapjoin_n8
@@ -2616,19 +2616,19 @@ STAGE PLANS:
PREHOOK: query: load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin_n8
PREHOOK: type: QUERY
-PREHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+PREHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
PREHOOK: Output: default@srcbucket_mapjoin_n8
POSTHOOK: query: load data local inpath '../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO TABLE srcbucket_mapjoin_n8
POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+POSTHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
POSTHOOK: Output: default@srcbucket_mapjoin_n8@hr=0
POSTHOOK: Output: default@srcbucket_mapjoin_n8@hr=1
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(hr=0).ds SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:ds, type:string, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(hr=0).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(hr=0).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(hr=1).ds SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:ds, type:string, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(hr=1).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(hr=1).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(hr=0).ds SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(hr=0).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(hr=0).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(hr=1).ds SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(hr=1).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(hr=1).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
PREHOOK: query: select * from srcbucket_mapjoin_n8
PREHOOK: type: QUERY
PREHOOK: Input: default@srcbucket_mapjoin_n8
@@ -2837,16 +2837,16 @@ PREHOOK: query: load data local inpath '../../data/files/load_data_job/load_data
INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
PREHOOK: type: QUERY
-PREHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+PREHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
PREHOOK: Output: default@srcbucket_mapjoin_n8
POSTHOOK: query: load data local inpath '../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE srcbucket_mapjoin_n8
INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__
+POSTHOOK: Input: default@srcbucket_mapjoin_n8__temp_table_for_load_data__
POSTHOOK: Output: default@srcbucket_mapjoin_n8@ds=2008-04-08
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_n8__TEMP_TABLE_FOR_LOAD_DATA__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: srcbucket_mapjoin_n8 PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_n8__temp_table_for_load_data__)srcbucket_mapjoin_n8__temp_table_for_load_data__.FieldSchema(name:value, type:string, comment:null), ]
PREHOOK: query: select * from srcbucket_mapjoin_n8
PREHOOK: type: QUERY
PREHOOK: Input: default@srcbucket_mapjoin_n8
[04/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_round.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_round.q.out b/ql/src/test/results/clientpositive/vector_decimal_round.q.out
index cdf0ba4..d690579 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_round.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_round.q.out
@@ -473,15 +473,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:dec:decimal(10,0), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:dec:decimal(10,0)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0))
outputColumnNames: _col0, _col1
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [0, 2]
- selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 0:decimal(10,0), decimalPlaces -1) -> 2:decimal(11,0)
+ projectedOutputColumnNums: [0, 3]
+ selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 2:decimal(10,0), decimalPlaces -1)(children: ConvertDecimal64ToDecimal(col 0:decimal(10,0)/DECIMAL_64) -> 2:decimal(10,0)) -> 3:decimal(11,0)
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: decimal(10,0))
@@ -497,8 +497,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -506,9 +506,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: dec:decimal(10,0)
+ dataColumns: dec:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(11,0)]
+ scratchColumnTypeNames: [decimal(10,0), decimal(11,0)]
Reduce Vectorization:
enabled: false
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -564,15 +564,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:dec:decimal(10,0), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:dec:decimal(10,0)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0))
outputColumnNames: _col0, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [0, 2]
- selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 0:decimal(10,0), decimalPlaces -1) -> 2:decimal(11,0)
+ projectedOutputColumnNums: [0, 3]
+ selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 2:decimal(10,0), decimalPlaces -1)(children: ConvertDecimal64ToDecimal(col 0:decimal(10,0)/DECIMAL_64) -> 2:decimal(10,0)) -> 3:decimal(11,0)
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col2 (type: decimal(11,0))
@@ -588,8 +588,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -597,9 +597,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: dec:decimal(10,0)
+ dataColumns: dec:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(11,0)]
+ scratchColumnTypeNames: [decimal(10,0), decimal(11,0)]
Reduce Vectorization:
enabled: false
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out b/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
index 4140393..446fa37 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
@@ -81,8 +81,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -223,8 +223,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -392,8 +392,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -550,8 +550,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out b/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out
index 284e71a..f25b0b2 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out
@@ -88,7 +88,7 @@ STAGE PLANS:
Statistics: Num rows: 30 Data size: 4712 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:id:int, 1:a:decimal(10,4), 2:b:decimal(15,8), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:id:int, 1:a:decimal(10,4)/DECIMAL_64, 2:b:decimal(15,8)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: id (type: int), a (type: decimal(10,4)), b (type: decimal(15,8))
outputColumnNames: _col0, _col1, _col2
@@ -111,8 +111,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -120,7 +120,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0, 1, 2]
- dataColumns: id:int, a:decimal(10,4), b:decimal(15,8)
+ dataColumns: id:int, a:decimal(10,4)/DECIMAL_64, b:decimal(15,8)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Vectorization:
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out b/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out
index c1eddca..089ef99 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out
@@ -83,12 +83,12 @@ STAGE PLANS:
Statistics: Num rows: 39 Data size: 4072 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(14,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(14,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterDecimalColEqualDecimalScalar(col 0:decimal(14,5), val 10)
+ predicateExpression: FilterDecimal64ColEqualDecimal64Scalar(col 0:decimal(14,5)/DECIMAL_64, val 1000000)
predicate: (key = 10) (type: boolean)
Statistics: Num rows: 19 Data size: 1983 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -114,8 +114,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -123,7 +123,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(14,5), value:int
+ dataColumns: key:decimal(14,5)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: [double, double, double, double, double, double, double]
@@ -188,12 +188,12 @@ STAGE PLANS:
Statistics: Num rows: 39 Data size: 4072 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(14,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(14,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterDecimalColEqualDecimalScalar(col 0:decimal(14,5), val 10)
+ predicateExpression: FilterDecimal64ColEqualDecimal64Scalar(col 0:decimal(14,5)/DECIMAL_64, val 1000000)
predicate: (key = 10) (type: boolean)
Statistics: Num rows: 19 Data size: 1983 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -219,8 +219,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -228,7 +228,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(14,5), value:int
+ dataColumns: key:decimal(14,5)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: [double, double, double, double, double, double, double, double]
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_delete_orig_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_delete_orig_table.q.out b/ql/src/test/results/clientpositive/vector_delete_orig_table.q.out
index 4ce897e..5d7f310 100644
--- a/ql/src/test/results/clientpositive/vector_delete_orig_table.q.out
+++ b/ql/src/test/results/clientpositive/vector_delete_orig_table.q.out
@@ -87,8 +87,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_distinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/vector_distinct_2.q.out
index 41c61ce..8eefb3d 100644
--- a/ql/src/test/results/clientpositive/vector_distinct_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_distinct_2.q.out
@@ -158,8 +158,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_elt.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_elt.q.out b/ql/src/test/results/clientpositive/vector_elt.q.out
index 1b3e856..5806ca7 100644
--- a/ql/src/test/results/clientpositive/vector_elt.q.out
+++ b/ql/src/test/results/clientpositive/vector_elt.q.out
@@ -59,8 +59,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -166,8 +166,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_empty_where.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_empty_where.q.out b/ql/src/test/results/clientpositive/vector_empty_where.q.out
index a77f55f..388b775 100644
--- a/ql/src/test/results/clientpositive/vector_empty_where.q.out
+++ b/ql/src/test/results/clientpositive/vector_empty_where.q.out
@@ -63,8 +63,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -206,8 +206,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -357,8 +357,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -508,8 +508,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_groupby4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby4.q.out b/ql/src/test/results/clientpositive/vector_groupby4.q.out
index 4822871..15b0427 100644
--- a/ql/src/test/results/clientpositive/vector_groupby4.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby4.q.out
@@ -70,8 +70,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_groupby6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby6.q.out b/ql/src/test/results/clientpositive/vector_groupby6.q.out
index 3353fdd..31472a1 100644
--- a/ql/src/test/results/clientpositive/vector_groupby6.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby6.q.out
@@ -70,8 +70,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_groupby_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/vector_groupby_3.q.out
index dbdbf46..173f84f 100644
--- a/ql/src/test/results/clientpositive/vector_groupby_3.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby_3.q.out
@@ -161,8 +161,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out b/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
index 56f0de2..c18ab63 100644
--- a/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
@@ -287,8 +287,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -478,8 +478,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -755,8 +755,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -953,8 +953,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_grouping_sets.q.out b/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
index cf0ec94..5113966 100644
--- a/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
+++ b/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
@@ -184,8 +184,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -295,8 +295,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_if_expr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_if_expr.q.out b/ql/src/test/results/clientpositive/vector_if_expr.q.out
index c948ea8..20c47ed 100644
--- a/ql/src/test/results/clientpositive/vector_if_expr.q.out
+++ b/ql/src/test/results/clientpositive/vector_if_expr.q.out
@@ -51,8 +51,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_include_no_sel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_include_no_sel.q.out b/ql/src/test/results/clientpositive/vector_include_no_sel.q.out
index dae2d63..0ecc7af 100644
--- a/ql/src/test/results/clientpositive/vector_include_no_sel.q.out
+++ b/ql/src/test/results/clientpositive/vector_include_no_sel.q.out
@@ -259,8 +259,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_interval_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_interval_1.q.out b/ql/src/test/results/clientpositive/vector_interval_1.q.out
index 70b7c66..6ee3154 100644
--- a/ql/src/test/results/clientpositive/vector_interval_1.q.out
+++ b/ql/src/test/results/clientpositive/vector_interval_1.q.out
@@ -106,8 +106,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -217,8 +217,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -336,8 +336,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -467,8 +467,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -610,8 +610,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -735,8 +735,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -842,8 +842,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -955,8 +955,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out b/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
index 1547942..2a390fa 100644
--- a/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
+++ b/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
@@ -100,8 +100,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -265,8 +265,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -430,8 +430,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -599,8 +599,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -698,8 +698,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -865,8 +865,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1032,8 +1032,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1199,8 +1199,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_interval_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_interval_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_interval_mapjoin.q.out
index 1654bd9..a3f4b29 100644
--- a/ql/src/test/results/clientpositive/vector_interval_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vector_interval_mapjoin.q.out
@@ -274,8 +274,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_left_outer_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_left_outer_join.q.out b/ql/src/test/results/clientpositive/vector_left_outer_join.q.out
index f3ddcd0..9aa6531 100644
--- a/ql/src/test/results/clientpositive/vector_left_outer_join.q.out
+++ b/ql/src/test/results/clientpositive/vector_left_outer_join.q.out
@@ -99,8 +99,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out b/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out
index 3b9bbf4..7f2f17f 100644
--- a/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out
@@ -353,8 +353,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -476,8 +476,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -599,8 +599,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -722,8 +722,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_multi_insert.q.out b/ql/src/test/results/clientpositive/vector_multi_insert.q.out
index 9fc92dd..65e1034 100644
--- a/ql/src/test/results/clientpositive/vector_multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/vector_multi_insert.q.out
@@ -159,8 +159,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_non_string_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_non_string_partition.q.out b/ql/src/test/results/clientpositive/vector_non_string_partition.q.out
index 3e4f7b9..218b562 100644
--- a/ql/src/test/results/clientpositive/vector_non_string_partition.q.out
+++ b/ql/src/test/results/clientpositive/vector_non_string_partition.q.out
@@ -78,8 +78,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -180,8 +180,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_null_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_null_projection.q.out b/ql/src/test/results/clientpositive/vector_null_projection.q.out
index 86f96f9..4c88b00 100644
--- a/ql/src/test/results/clientpositive/vector_null_projection.q.out
+++ b/ql/src/test/results/clientpositive/vector_null_projection.q.out
@@ -79,8 +79,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_nvl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_nvl.q.out b/ql/src/test/results/clientpositive/vector_nvl.q.out
index c6eb842..7a6623e 100644
--- a/ql/src/test/results/clientpositive/vector_nvl.q.out
+++ b/ql/src/test/results/clientpositive/vector_nvl.q.out
@@ -61,8 +61,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -152,8 +152,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -241,8 +241,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -330,8 +330,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_orderby_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/vector_orderby_5.q.out
index 324bdd0..734c6a9 100644
--- a/ql/src/test/results/clientpositive/vector_orderby_5.q.out
+++ b/ql/src/test/results/clientpositive/vector_orderby_5.q.out
@@ -162,8 +162,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_outer_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join0.q.out b/ql/src/test/results/clientpositive/vector_outer_join0.q.out
index 955c39a..9bb2fad 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join0.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join0.q.out
@@ -144,8 +144,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -263,8 +263,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/vector_outer_join1.q.out
index 5096b51..ece32f6 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join1.q.out
@@ -304,8 +304,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -446,8 +446,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -721,8 +721,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/vector_outer_join2.q.out
index ef6ef3a..455d09e 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join2.q.out
@@ -361,8 +361,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
[13/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out b/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
index 8ad2017..c66f3d4 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_date_funcs.q.out
@@ -288,8 +288,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -580,8 +580,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -876,8 +876,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1150,8 +1150,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1280,8 +1280,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out b/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
index de3c6e6..7e78360 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_distinct_gby.q.out
@@ -93,8 +93,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -226,8 +226,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
index d3ab509..8ee96d3 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
@@ -82,8 +82,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -261,8 +260,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -305,8 +303,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -419,8 +417,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -448,8 +445,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -578,8 +575,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -622,8 +618,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -666,8 +662,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -804,8 +800,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -833,8 +828,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -862,8 +857,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1006,8 +1001,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1065,8 +1059,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1179,8 +1173,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1208,8 +1201,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1333,8 +1326,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1377,8 +1369,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1491,8 +1483,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1520,8 +1511,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1643,8 +1634,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1687,8 +1677,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1801,8 +1791,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1845,8 +1834,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1959,8 +1948,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1988,8 +1976,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2102,8 +2090,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2131,8 +2118,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2258,8 +2245,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2302,8 +2288,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2437,8 +2423,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2577,8 +2562,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2605,8 +2589,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2723,8 +2707,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2782,8 +2765,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2896,8 +2879,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2940,8 +2922,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3052,8 +3034,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3078,8 +3060,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3172,8 +3153,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3216,8 +3196,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3314,8 +3294,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3358,8 +3337,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3402,8 +3381,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3537,8 +3516,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3594,8 +3573,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3722,8 +3701,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3752,8 +3730,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3782,8 +3759,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3982,8 +3958,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4012,8 +3987,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4042,8 +4016,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4247,8 +4220,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4277,8 +4249,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4305,8 +4276,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4335,8 +4305,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4597,8 +4566,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4641,8 +4609,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4773,8 +4741,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4817,8 +4784,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4861,8 +4828,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4983,8 +4950,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5042,8 +5008,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5161,8 +5127,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5205,8 +5170,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5313,8 +5278,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5357,8 +5321,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5465,8 +5429,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5509,8 +5472,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5625,8 +5588,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5779,8 +5741,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5823,8 +5784,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5914,8 +5875,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5940,8 +5901,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6015,8 +5975,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6057,8 +6016,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6157,8 +6116,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6201,8 +6159,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6245,8 +6203,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6355,8 +6313,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6425,8 +6383,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6533,8 +6491,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6563,8 +6520,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6593,8 +6549,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out
index 76d368c..2aa9af2 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction.q.out
@@ -84,8 +84,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -155,8 +155,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -318,8 +318,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -389,8 +389,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -552,8 +552,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -623,8 +623,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -787,8 +787,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -858,8 +858,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -929,8 +929,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1124,8 +1124,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1223,8 +1223,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1415,8 +1415,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1486,8 +1486,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction2.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction2.q.out
index 37eebe4..c03b298 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_semijoin_reduction2.q.out
@@ -226,10 +226,14 @@ POSTHOOK: Input: default@dsrv2_big
POSTHOOK: Input: default@dsrv2_small
#### A masked pattern was here ####
20
-PREHOOK: query: EXPLAIN select count(*) from dsrv2_big a join dsrv2_small b on (a.partkey_decimal = b.partkey_decimal)
+PREHOOK: query: EXPLAIN VECTORIZATION DETAIL select count(*) from dsrv2_big a join dsrv2_small b on (a.partkey_decimal = b.partkey_decimal)
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN select count(*) from dsrv2_big a join dsrv2_small b on (a.partkey_decimal = b.partkey_decimal)
+POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL select count(*) from dsrv2_big a join dsrv2_small b on (a.partkey_decimal = b.partkey_decimal)
POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+ enabled: true
+ enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
@@ -251,53 +255,134 @@ STAGE PLANS:
alias: a
filterExpr: (partkey_decimal is not null and (partkey_decimal BETWEEN DynamicValue(RS_7_b_partkey_decimal_min) AND DynamicValue(RS_7_b_partkey_decimal_max) and in_bloom_filter(partkey_decimal, DynamicValue(RS_7_b_partkey_decimal_bloom_filter)))) (type: boolean)
Statistics: Num rows: 100 Data size: 11200 Basic stats: COMPLETE Column stats: COMPLETE
+ TableScan Vectorization:
+ native: true
+ vectorizationSchemaColumns: [0:partkey_bigint:bigint, 1:partkey_decimal:decimal(10,1)/DECIMAL_64, 2:partkey_double:double, 3:shipdate_date:date, 4:shipdate_ts:timestamp, 5:shipdate_string:string, 6:shipdate_char:char(10), 7:shipdate_varchar:varchar(10), 8:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
+ Filter Vectorization:
+ className: VectorFilterOperator
+ native: true
+ predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 9:decimal(10,1))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,1)/DECIMAL_64) -> 9:decimal(10,1)), FilterExprAndExpr(children: FilterDecimalColumnBetweenDynamicValue(col 9:decimal(10,1), left 0, right 0)(children: ConvertDecimal64ToDecimal(col 1:decimal(10,1)/DECIMAL_64) -> 9:decimal(10,1)), VectorInBloomFilterColDynamicValue(children: ConvertDecimal64ToDecimal(col 1:decimal(10,1)/DECIMAL_64) -> 9:decimal(10,1))))
predicate: ((partkey_decimal BETWEEN DynamicValue(RS_7_b_partkey_decimal_min) AND DynamicValue(RS_7_b_partkey_decimal_max) and in_bloom_filter(partkey_decimal, DynamicValue(RS_7_b_partkey_decimal_bloom_filter))) and partkey_decimal is not null) (type: boolean)
Statistics: Num rows: 100 Data size: 11200 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: partkey_decimal (type: decimal(10,1))
outputColumnNames: _col0
+ Select Vectorization:
+ className: VectorSelectOperator
+ native: true
+ projectedOutputColumnNums: [1]
Statistics: Num rows: 100 Data size: 11200 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: decimal(10,1))
sort order: +
Map-reduce partition columns: _col0 (type: decimal(10,1))
+ Reduce Sink Vectorization:
+ className: VectorReduceSinkMultiKeyOperator
+ keyColumnNums: [1]
+ native: true
+ nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+ valueColumnNums: []
Statistics: Num rows: 100 Data size: 11200 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: vectorized, llap
LLAP IO: all inputs
+ Map Vectorization:
+ enabled: true
+ enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
+ inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ allNative: true
+ usesVectorUDFAdaptor: false
+ vectorized: true
+ rowBatchContext:
+ dataColumnCount: 8
+ includeColumns: [1]
+ dataColumns: partkey_bigint:bigint, partkey_decimal:decimal(10,1)/DECIMAL_64, partkey_double:double, shipdate_date:date, shipdate_ts:timestamp, shipdate_string:string, shipdate_char:char(10), shipdate_varchar:varchar(10)
+ partitionColumnCount: 0
+ scratchColumnTypeNames: [decimal(10,1)]
Map 4
Map Operator Tree:
TableScan
alias: b
filterExpr: partkey_decimal is not null (type: boolean)
Statistics: Num rows: 20 Data size: 2240 Basic stats: COMPLETE Column stats: COMPLETE
+ TableScan Vectorization:
+ native: true
+ vectorizationSchemaColumns: [0:partkey_bigint:bigint, 1:partkey_decimal:decimal(10,1)/DECIMAL_64, 2:partkey_double:double, 3:shipdate_date:date, 4:shipdate_ts:timestamp, 5:shipdate_string:string, 6:shipdate_char:char(10), 7:shipdate_varchar:varchar(10), 8:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
+ Filter Vectorization:
+ className: VectorFilterOperator
+ native: true
+ predicateExpression: SelectColumnIsNotNull(col 9:decimal(10,1))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,1)/DECIMAL_64) -> 9:decimal(10,1))
predicate: partkey_decimal is not null (type: boolean)
Statistics: Num rows: 20 Data size: 2240 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: partkey_decimal (type: decimal(10,1))
outputColumnNames: _col0
+ Select Vectorization:
+ className: VectorSelectOperator
+ native: true
+ projectedOutputColumnNums: [1]
Statistics: Num rows: 20 Data size: 2240 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: decimal(10,1))
sort order: +
Map-reduce partition columns: _col0 (type: decimal(10,1))
+ Reduce Sink Vectorization:
+ className: VectorReduceSinkMultiKeyOperator
+ keyColumnNums: [1]
+ native: true
+ nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+ valueColumnNums: []
Statistics: Num rows: 20 Data size: 2240 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: decimal(10,1))
outputColumnNames: _col0
+ Select Vectorization:
+ className: VectorSelectOperator
+ native: true
+ projectedOutputColumnNums: [1]
Statistics: Num rows: 20 Data size: 2240 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=20)
+ Group By Vectorization:
+ aggregators: VectorUDAFMinDecimal64(col 1:decimal(10,1)/DECIMAL_64) -> decimal(10,1)/DECIMAL_64, VectorUDAFMaxDecimal64(col 1:decimal(10,1)/DECIMAL_64) -> decimal(10,1)/DECIMAL_64, VectorUDAFBloomFilter(ConvertDecimal64ToDecimal(col 1:decimal(10,1)/DECIMAL_64) -> 9:decimal(10,1)) -> binary
+ className: VectorGroupByOperator
+ groupByMode: HASH
+ native: false
+ vectorProcessingMode: HASH
+ projectedOutputColumnNums: [0, 1, 2]
mode: hash
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
sort order:
+ Reduce Sink Vectorization:
+ className: VectorReduceSinkEmptyKeyOperator
+ keyColumnNums: []
+ native: true
+ nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+ valueColumnNums: [0, 1, 2]
Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: decimal(10,1)), _col1 (type: decimal(10,1)), _col2 (type: binary)
Execution mode: vectorized, llap
LLAP IO: all inputs
+ Map Vectorization:
+ enabled: true
+ enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
+ inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ allNative: false
+ usesVectorUDFAdaptor: false
+ vectorized: true
+ rowBatchContext:
+ dataColumnCount: 8
+ includeColumns: [1]
+ dataColumns: partkey_bigint:bigint, partkey_decimal:decimal(10,1)/DECIMAL_64, partkey_double:double, shipdate_date:date, shipdate_ts:timestamp, shipdate_string:string, shipdate_char:char(10), shipdate_varchar:varchar(10)
+ partitionColumnCount: 0
+ scratchColumnTypeNames: [decimal(10,1)]
Reducer 2
Execution mode: llap
Reduce Operator Tree:
@@ -319,14 +404,37 @@ STAGE PLANS:
value expressions: _col0 (type: bigint)
Reducer 3
Execution mode: vectorized, llap
+ Reduce Vectorization:
+ enabled: true
+ enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+ reduceColumnNullOrder:
+ reduceColumnSortOrder:
+ allNative: false
+ usesVectorUDFAdaptor: false
+ vectorized: true
+ rowBatchContext:
+ dataColumnCount: 1
+ dataColumns: VALUE._col0:bigint
+ partitionColumnCount: 0
+ scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
+ Group By Vectorization:
+ aggregators: VectorUDAFCountMerge(col 0:bigint) -> bigint
+ className: VectorGroupByOperator
+ groupByMode: MERGEPARTIAL
+ native: false
+ vectorProcessingMode: GLOBAL
+ projectedOutputColumnNums: [0]
mode: mergepartial
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
+ File Sink Vectorization:
+ className: VectorFileSinkOperator
+ native: false
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -334,14 +442,40 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Reducer 5
Execution mode: vectorized, llap
+ Reduce Vectorization:
+ enabled: true
+ enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+ reduceColumnNullOrder:
+ reduceColumnSortOrder:
+ allNative: false
+ usesVectorUDFAdaptor: false
+ vectorized: true
+ rowBatchContext:
+ dataColumnCount: 3
+ dataColumns: VALUE._col0:decimal(10,1), VALUE._col1:decimal(10,1), VALUE._col2:binary
+ partitionColumnCount: 0
+ scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=20)
+ Group By Vectorization:
+ aggregators: VectorUDAFMinDecimal(col 0:decimal(10,1)) -> decimal(10,1), VectorUDAFMaxDecimal(col 1:decimal(10,1)) -> decimal(10,1), VectorUDAFBloomFilterMerge(col 2:binary) -> binary
+ className: VectorGroupByOperator
+ groupByMode: FINAL
+ native: false
+ vectorProcessingMode: STREAMING
+ projectedOutputColumnNums: [0, 1, 2]
mode: final
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
sort order:
+ Reduce Sink Vectorization:
+ className: VectorReduceSinkEmptyKeyOperator
+ keyColumnNums: []
+ native: true
+ nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+ valueColumnNums: [0, 1, 2]
Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: decimal(10,1)), _col1 (type: decimal(10,1)), _col2 (type: binary)
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out
index b30fbf3..53cb943 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin.q.out
@@ -93,8 +93,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -135,8 +135,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
[65/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/test/results/clientpositive/stats_part2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_part2.q.out b/ql/src/test/results/clientpositive/stats_part2.q.out
new file mode 100644
index 0000000..94e186d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_part2.q.out
@@ -0,0 +1,1261 @@
+PREHOOK: query: drop table if exists mysource
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists mysource
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table mysource (p int, key int, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mysource
+POSTHOOK: query: create table mysource (p int, key int, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mysource
+PREHOOK: query: insert into mysource values (100,20,'value20'), (101,40,'string40'), (102,50,'string50')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,20,'value20'), (101,40,'string40'), (102,50,'string50')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: insert into mysource values (100,21,'value21'), (101,41,'value41'), (102,51,'value51')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,21,'value21'), (101,41,'value41'), (102,51,'value51')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: drop table if exists stats_partitioned
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists stats_partitioned
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_part
+POSTHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_part
+PREHOOK: query: explain select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: (p > 100) (type: boolean)
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Filter Operator
+ predicate: (p > 100) (type: boolean)
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+ Group By Operator
+ aggregations: max(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ value expressions: _col0 (type: int)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 0
+ numPartitions 0
+ numRows 0
+ rawDataSize 0
+ totalSize 0
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=100
+POSTHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=100
+POSTHOOK: Lineage: stats_part PARTITION(p=100).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=100).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=101
+POSTHOOK: query: insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=101
+POSTHOOK: Lineage: stats_part PARTITION(p=101).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=101).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=102
+POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=102).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 3
+ numPartitions 3
+ numRows 6
+ rawDataSize 0
+ totalSize 2337
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain select count(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: insert into table mysource values (103,20,'value20'), (103,83,'value83'), (103,53,'value53')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into table mysource values (103,20,'value20'), (103,83,'value83'), (103,53,'value53')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+POSTHOOK: Lineage: mysource.value SCRIPT []
+PREHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_part@p=102
+POSTHOOK: Lineage: stats_part PARTITION(p=102).key SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=102).value SIMPLE [(mysource)mysource.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 4
+ numPartitions 3
+ numRows 8
+ rawDataSize 0
+ totalSize 3126
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: show partitions stats_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: show partitions stats_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@stats_part
+p=100
+p=101
+p=102
+PREHOOK: query: explain select count(*) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+8
+PREHOOK: query: explain select count(key) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+8
+PREHOOK: query: explain select count(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+6
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+51
+PREHOOK: query: desc formatted stats_part partition(p = 100)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 100)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [100]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 1
+ numRows 2
+ rawDataSize 0
+ totalSize 758
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 101)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 101)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [101]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 1
+ numRows 2
+ rawDataSize 0
+ totalSize 789
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 102)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 102)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [102]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 2
+ numRows 4
+ rawDataSize 0
+ totalSize 1579
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: update stats_part set key = key + 100 where key in(-50,40) and p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+PREHOOK: Output: default@stats_part@p=101
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: update stats_part set key = key + 100 where key in(-50,40) and p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+POSTHOOK: Output: default@stats_part@p=101
+POSTHOOK: Output: default@stats_part@p=102
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: max(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: int)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+140
+PREHOOK: query: desc formatted stats_part partition(p = 100)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 100)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [100]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 1
+ numRows 2
+ rawDataSize 0
+ totalSize 758
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 101)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 101)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [101]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 3
+ numRows 2
+ rawDataSize 0
+ totalSize 2238
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 102)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 102)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [102]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 2
+ numRows 4
+ rawDataSize 0
+ totalSize 1579
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select count(value) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=100
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select count(value) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=100
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+8
+PREHOOK: query: update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+PREHOOK: Output: default@stats_part@p=101
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: update stats_part set value = concat(value, 'updated') where cast(key as integer) in(40,53) and p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+POSTHOOK: Output: default@stats_part@p=101
+POSTHOOK: Output: default@stats_part@p=102
+PREHOOK: query: desc formatted stats_part partition(p = 100)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 100)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [100]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 1
+ numRows 2
+ rawDataSize 0
+ totalSize 758
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 101)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 101)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [101]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 3
+ numRows 2
+ rawDataSize 0
+ totalSize 2238
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 102)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 102)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [102]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 2
+ numRows 4
+ rawDataSize 0
+ totalSize 1579
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select count(value) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=100
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select count(value) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=100
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+8
+PREHOOK: query: delete from stats_part where key in (20, 41)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=100
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+PREHOOK: Output: default@stats_part@p=100
+PREHOOK: Output: default@stats_part@p=101
+PREHOOK: Output: default@stats_part@p=102
+POSTHOOK: query: delete from stats_part where key in (20, 41)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=100
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+POSTHOOK: Output: default@stats_part@p=100
+POSTHOOK: Output: default@stats_part@p=101
+POSTHOOK: Output: default@stats_part@p=102
+PREHOOK: query: desc formatted stats_part partition(p = 100)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 100)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [100]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 2
+ numRows 1
+ rawDataSize 0
+ totalSize 1366
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 101)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 101)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [101]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ numFiles 4
+ numRows 1
+ rawDataSize 0
+ totalSize 2837
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted stats_part partition(p = 102)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part partition(p = 102)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Partition Information
+Partition Value: [102]
+Database: default
+Table: stats_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ numFiles 2
+ numRows 4
+ rawDataSize 0
+ totalSize 1579
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain select count(*) from stats_part where p = 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p = 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part where p = 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part where p = 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+1
+PREHOOK: query: explain select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+5
+PREHOOK: query: explain select count(key) from stats_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(key) from stats_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(key) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=100
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select count(key) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=100
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+6
+PREHOOK: query: explain select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+5
+PREHOOK: query: explain select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: stats_part
+ Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: key
+ Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: max(key)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: int)
+ Execution mode: vectorized
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select max(key) from stats_part where p > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+PREHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+POSTHOOK: query: select max(key) from stats_part where p > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+POSTHOOK: Input: default@stats_part@p=102
+#### A masked pattern was here ####
+140
+PREHOOK: query: describe extended stats_part partition (p=101)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: describe extended stats_part partition (p=101)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+key int
+value string
+p int
+
+# Partition Information
+# col_name data_type comment
+p int
+
+#### A masked pattern was here ####
+PREHOOK: query: describe extended stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: describe extended stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+key int
+value string
+p int
+
+# Partition Information
+# col_name data_type comment
+p int
+
+#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/ql/src/test/results/clientpositive/stats_sizebug.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_sizebug.q.out b/ql/src/test/results/clientpositive/stats_sizebug.q.out
new file mode 100644
index 0000000..b1bbf94
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_sizebug.q.out
@@ -0,0 +1,216 @@
+PREHOOK: query: drop table if exists mysource
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists mysource
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table mysource (p int,key int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mysource
+POSTHOOK: query: create table mysource (p int,key int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mysource
+PREHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+PREHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@mysource
+POSTHOOK: query: insert into mysource values (100,20), (101,40), (102,50)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@mysource
+POSTHOOK: Lineage: mysource.key SCRIPT []
+POSTHOOK: Lineage: mysource.p SCRIPT []
+PREHOOK: query: drop table if exists stats_nonpartitioned
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists stats_nonpartitioned
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_nonpartitioned
+POSTHOOK: query: create table stats_nonpartitioned(key int, value int) stored as orc tblproperties ("transactional"="true")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_nonpartitioned
+PREHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+ Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: mysource
+ Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: (p = 100) (type: boolean)
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: 100 (type: int), key (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.stats_nonpartitioned
+ Write Type: INSERT
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: int)
+ outputColumnNames: key, value
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: compute_stats(key, 'hll'), compute_stats(value, 'hll')
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 848 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.stats_nonpartitioned
+ Write Type: INSERT
+
+ Stage: Stage-2
+ Stats Work
+ Basic Stats Work:
+ Column Stats Desc:
+ Columns: key, value
+ Column Types: int, int
+ Table: default.stats_nonpartitioned
+
+PREHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@stats_nonpartitioned
+POSTHOOK: query: insert into table stats_nonpartitioned select * from mysource where p == 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@stats_nonpartitioned
+POSTHOOK: Lineage: stats_nonpartitioned.key SIMPLE []
+POSTHOOK: Lineage: stats_nonpartitioned.value SIMPLE [(mysource)mysource.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: desc formatted stats_nonpartitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_nonpartitioned
+POSTHOOK: query: desc formatted stats_nonpartitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_nonpartitioned
+# col_name data_type comment
+key int
+value int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ bucketing_version 2
+ numFiles 1
+ numRows 2
+ rawDataSize 0
+ totalSize 719
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: analyze table mysource compute statistics for columns p, key
+PREHOOK: type: ANALYZE_TABLE
+PREHOOK: Input: default@mysource
+PREHOOK: Output: default@mysource
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table mysource compute statistics for columns p, key
+POSTHOOK: type: ANALYZE_TABLE
+POSTHOOK: Input: default@mysource
+POSTHOOK: Output: default@mysource
+#### A masked pattern was here ####
+PREHOOK: query: desc formatted stats_nonpartitioned
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_nonpartitioned
+POSTHOOK: query: desc formatted stats_nonpartitioned
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_nonpartitioned
+# col_name data_type comment
+key int
+value int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+ bucketing_version 2
+ numFiles 1
+ numRows 2
+ rawDataSize 0
+ totalSize 719
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
[60/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
new file mode 100644
index 0000000..8d4102f
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
@@ -0,0 +1,966 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AlterPartitionsRequest implements org.apache.thrift.TBase<AlterPartitionsRequest, AlterPartitionsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AlterPartitionsRequest> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlterPartitionsRequest");
+
+ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+ private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2);
+ private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)3);
+ private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environmentContext", org.apache.thrift.protocol.TType.STRUCT, (short)4);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new AlterPartitionsRequestStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new AlterPartitionsRequestTupleSchemeFactory());
+ }
+
+ private String dbName; // required
+ private String tableName; // required
+ private List<Partition> partitions; // required
+ private EnvironmentContext environmentContext; // required
+ private long txnId; // optional
+ private String validWriteIdList; // optional
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ DB_NAME((short)1, "dbName"),
+ TABLE_NAME((short)2, "tableName"),
+ PARTITIONS((short)3, "partitions"),
+ ENVIRONMENT_CONTEXT((short)4, "environmentContext"),
+ TXN_ID((short)5, "txnId"),
+ VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // DB_NAME
+ return DB_NAME;
+ case 2: // TABLE_NAME
+ return TABLE_NAME;
+ case 3: // PARTITIONS
+ return PARTITIONS;
+ case 4: // ENVIRONMENT_CONTEXT
+ return ENVIRONMENT_CONTEXT;
+ case 5: // TXN_ID
+ return TXN_ID;
+ case 6: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+ private static final int __TXNID_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.REQUIRED,
+ new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
+ tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environmentContext", org.apache.thrift.TFieldRequirementType.REQUIRED,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlterPartitionsRequest.class, metaDataMap);
+ }
+
+ public AlterPartitionsRequest() {
+ this.txnId = -1L;
+
+ }
+
+ public AlterPartitionsRequest(
+ String dbName,
+ String tableName,
+ List<Partition> partitions,
+ EnvironmentContext environmentContext)
+ {
+ this();
+ this.dbName = dbName;
+ this.tableName = tableName;
+ this.partitions = partitions;
+ this.environmentContext = environmentContext;
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public AlterPartitionsRequest(AlterPartitionsRequest other) {
+ __isset_bitfield = other.__isset_bitfield;
+ if (other.isSetDbName()) {
+ this.dbName = other.dbName;
+ }
+ if (other.isSetTableName()) {
+ this.tableName = other.tableName;
+ }
+ if (other.isSetPartitions()) {
+ List<Partition> __this__partitions = new ArrayList<Partition>(other.partitions.size());
+ for (Partition other_element : other.partitions) {
+ __this__partitions.add(new Partition(other_element));
+ }
+ this.partitions = __this__partitions;
+ }
+ if (other.isSetEnvironmentContext()) {
+ this.environmentContext = new EnvironmentContext(other.environmentContext);
+ }
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
+ }
+
+ public AlterPartitionsRequest deepCopy() {
+ return new AlterPartitionsRequest(this);
+ }
+
+ @Override
+ public void clear() {
+ this.dbName = null;
+ this.tableName = null;
+ this.partitions = null;
+ this.environmentContext = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
+ }
+
+ public String getDbName() {
+ return this.dbName;
+ }
+
+ public void setDbName(String dbName) {
+ this.dbName = dbName;
+ }
+
+ public void unsetDbName() {
+ this.dbName = null;
+ }
+
+ /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+ public boolean isSetDbName() {
+ return this.dbName != null;
+ }
+
+ public void setDbNameIsSet(boolean value) {
+ if (!value) {
+ this.dbName = null;
+ }
+ }
+
+ public String getTableName() {
+ return this.tableName;
+ }
+
+ public void setTableName(String tableName) {
+ this.tableName = tableName;
+ }
+
+ public void unsetTableName() {
+ this.tableName = null;
+ }
+
+ /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+ public boolean isSetTableName() {
+ return this.tableName != null;
+ }
+
+ public void setTableNameIsSet(boolean value) {
+ if (!value) {
+ this.tableName = null;
+ }
+ }
+
+ public int getPartitionsSize() {
+ return (this.partitions == null) ? 0 : this.partitions.size();
+ }
+
+ public java.util.Iterator<Partition> getPartitionsIterator() {
+ return (this.partitions == null) ? null : this.partitions.iterator();
+ }
+
+ public void addToPartitions(Partition elem) {
+ if (this.partitions == null) {
+ this.partitions = new ArrayList<Partition>();
+ }
+ this.partitions.add(elem);
+ }
+
+ public List<Partition> getPartitions() {
+ return this.partitions;
+ }
+
+ public void setPartitions(List<Partition> partitions) {
+ this.partitions = partitions;
+ }
+
+ public void unsetPartitions() {
+ this.partitions = null;
+ }
+
+ /** Returns true if field partitions is set (has been assigned a value) and false otherwise */
+ public boolean isSetPartitions() {
+ return this.partitions != null;
+ }
+
+ public void setPartitionsIsSet(boolean value) {
+ if (!value) {
+ this.partitions = null;
+ }
+ }
+
+ public EnvironmentContext getEnvironmentContext() {
+ return this.environmentContext;
+ }
+
+ public void setEnvironmentContext(EnvironmentContext environmentContext) {
+ this.environmentContext = environmentContext;
+ }
+
+ public void unsetEnvironmentContext() {
+ this.environmentContext = null;
+ }
+
+ /** Returns true if field environmentContext is set (has been assigned a value) and false otherwise */
+ public boolean isSetEnvironmentContext() {
+ return this.environmentContext != null;
+ }
+
+ public void setEnvironmentContextIsSet(boolean value) {
+ if (!value) {
+ this.environmentContext = null;
+ }
+ }
+
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case DB_NAME:
+ if (value == null) {
+ unsetDbName();
+ } else {
+ setDbName((String)value);
+ }
+ break;
+
+ case TABLE_NAME:
+ if (value == null) {
+ unsetTableName();
+ } else {
+ setTableName((String)value);
+ }
+ break;
+
+ case PARTITIONS:
+ if (value == null) {
+ unsetPartitions();
+ } else {
+ setPartitions((List<Partition>)value);
+ }
+ break;
+
+ case ENVIRONMENT_CONTEXT:
+ if (value == null) {
+ unsetEnvironmentContext();
+ } else {
+ setEnvironmentContext((EnvironmentContext)value);
+ }
+ break;
+
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case DB_NAME:
+ return getDbName();
+
+ case TABLE_NAME:
+ return getTableName();
+
+ case PARTITIONS:
+ return getPartitions();
+
+ case ENVIRONMENT_CONTEXT:
+ return getEnvironmentContext();
+
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case DB_NAME:
+ return isSetDbName();
+ case TABLE_NAME:
+ return isSetTableName();
+ case PARTITIONS:
+ return isSetPartitions();
+ case ENVIRONMENT_CONTEXT:
+ return isSetEnvironmentContext();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof AlterPartitionsRequest)
+ return this.equals((AlterPartitionsRequest)that);
+ return false;
+ }
+
+ public boolean equals(AlterPartitionsRequest that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_dbName = true && this.isSetDbName();
+ boolean that_present_dbName = true && that.isSetDbName();
+ if (this_present_dbName || that_present_dbName) {
+ if (!(this_present_dbName && that_present_dbName))
+ return false;
+ if (!this.dbName.equals(that.dbName))
+ return false;
+ }
+
+ boolean this_present_tableName = true && this.isSetTableName();
+ boolean that_present_tableName = true && that.isSetTableName();
+ if (this_present_tableName || that_present_tableName) {
+ if (!(this_present_tableName && that_present_tableName))
+ return false;
+ if (!this.tableName.equals(that.tableName))
+ return false;
+ }
+
+ boolean this_present_partitions = true && this.isSetPartitions();
+ boolean that_present_partitions = true && that.isSetPartitions();
+ if (this_present_partitions || that_present_partitions) {
+ if (!(this_present_partitions && that_present_partitions))
+ return false;
+ if (!this.partitions.equals(that.partitions))
+ return false;
+ }
+
+ boolean this_present_environmentContext = true && this.isSetEnvironmentContext();
+ boolean that_present_environmentContext = true && that.isSetEnvironmentContext();
+ if (this_present_environmentContext || that_present_environmentContext) {
+ if (!(this_present_environmentContext && that_present_environmentContext))
+ return false;
+ if (!this.environmentContext.equals(that.environmentContext))
+ return false;
+ }
+
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ boolean present_dbName = true && (isSetDbName());
+ list.add(present_dbName);
+ if (present_dbName)
+ list.add(dbName);
+
+ boolean present_tableName = true && (isSetTableName());
+ list.add(present_tableName);
+ if (present_tableName)
+ list.add(tableName);
+
+ boolean present_partitions = true && (isSetPartitions());
+ list.add(present_partitions);
+ if (present_partitions)
+ list.add(partitions);
+
+ boolean present_environmentContext = true && (isSetEnvironmentContext());
+ list.add(present_environmentContext);
+ if (present_environmentContext)
+ list.add(environmentContext);
+
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(AlterPartitionsRequest other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetDbName()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTableName()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetPartitions()).compareTo(other.isSetPartitions());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetPartitions()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitions, other.partitions);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetEnvironmentContext()).compareTo(other.isSetEnvironmentContext());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetEnvironmentContext()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.environmentContext, other.environmentContext);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("AlterPartitionsRequest(");
+ boolean first = true;
+
+ sb.append("dbName:");
+ if (this.dbName == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.dbName);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("tableName:");
+ if (this.tableName == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.tableName);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("partitions:");
+ if (this.partitions == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.partitions);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("environmentContext:");
+ if (this.environmentContext == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.environmentContext);
+ }
+ first = false;
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ if (!isSetDbName()) {
+ throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+ }
+
+ if (!isSetTableName()) {
+ throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString());
+ }
+
+ if (!isSetPartitions()) {
+ throw new org.apache.thrift.protocol.TProtocolException("Required field 'partitions' is unset! Struct:" + toString());
+ }
+
+ if (!isSetEnvironmentContext()) {
+ throw new org.apache.thrift.protocol.TProtocolException("Required field 'environmentContext' is unset! Struct:" + toString());
+ }
+
+ // check for sub-struct validity
+ if (environmentContext != null) {
+ environmentContext.validate();
+ }
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class AlterPartitionsRequestStandardSchemeFactory implements SchemeFactory {
+ public AlterPartitionsRequestStandardScheme getScheme() {
+ return new AlterPartitionsRequestStandardScheme();
+ }
+ }
+
+ private static class AlterPartitionsRequestStandardScheme extends StandardScheme<AlterPartitionsRequest> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsRequest struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ case 1: // DB_NAME
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.dbName = iprot.readString();
+ struct.setDbNameIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 2: // TABLE_NAME
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.tableName = iprot.readString();
+ struct.setTableNameIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 3: // PARTITIONS
+ if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+ {
+ org.apache.thrift.protocol.TList _list928 = iprot.readListBegin();
+ struct.partitions = new ArrayList<Partition>(_list928.size);
+ Partition _elem929;
+ for (int _i930 = 0; _i930 < _list928.size; ++_i930)
+ {
+ _elem929 = new Partition();
+ _elem929.read(iprot);
+ struct.partitions.add(_elem929);
+ }
+ iprot.readListEnd();
+ }
+ struct.setPartitionsIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 4: // ENVIRONMENT_CONTEXT
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+ struct.environmentContext = new EnvironmentContext();
+ struct.environmentContext.read(iprot);
+ struct.setEnvironmentContextIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 5: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 6: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsRequest struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (struct.dbName != null) {
+ oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+ oprot.writeString(struct.dbName);
+ oprot.writeFieldEnd();
+ }
+ if (struct.tableName != null) {
+ oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+ oprot.writeString(struct.tableName);
+ oprot.writeFieldEnd();
+ }
+ if (struct.partitions != null) {
+ oprot.writeFieldBegin(PARTITIONS_FIELD_DESC);
+ {
+ oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size()));
+ for (Partition _iter931 : struct.partitions)
+ {
+ _iter931.write(oprot);
+ }
+ oprot.writeListEnd();
+ }
+ oprot.writeFieldEnd();
+ }
+ if (struct.environmentContext != null) {
+ oprot.writeFieldBegin(ENVIRONMENT_CONTEXT_FIELD_DESC);
+ struct.environmentContext.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class AlterPartitionsRequestTupleSchemeFactory implements SchemeFactory {
+ public AlterPartitionsRequestTupleScheme getScheme() {
+ return new AlterPartitionsRequestTupleScheme();
+ }
+ }
+
+ private static class AlterPartitionsRequestTupleScheme extends TupleScheme<AlterPartitionsRequest> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ oprot.writeString(struct.dbName);
+ oprot.writeString(struct.tableName);
+ {
+ oprot.writeI32(struct.partitions.size());
+ for (Partition _iter932 : struct.partitions)
+ {
+ _iter932.write(oprot);
+ }
+ }
+ struct.environmentContext.write(oprot);
+ BitSet optionals = new BitSet();
+ if (struct.isSetTxnId()) {
+ optionals.set(0);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(1);
+ }
+ oprot.writeBitSet(optionals, 2);
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequest struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ struct.dbName = iprot.readString();
+ struct.setDbNameIsSet(true);
+ struct.tableName = iprot.readString();
+ struct.setTableNameIsSet(true);
+ {
+ org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.partitions = new ArrayList<Partition>(_list933.size);
+ Partition _elem934;
+ for (int _i935 = 0; _i935 < _list933.size; ++_i935)
+ {
+ _elem934 = new Partition();
+ _elem934.read(iprot);
+ struct.partitions.add(_elem934);
+ }
+ }
+ struct.setPartitionsIsSet(true);
+ struct.environmentContext = new EnvironmentContext();
+ struct.environmentContext.read(iprot);
+ struct.setEnvironmentContextIsSet(true);
+ BitSet incoming = iprot.readBitSet(2);
+ if (incoming.get(0)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(1)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsResponse.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsResponse.java
new file mode 100644
index 0000000..8e03462
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsResponse.java
@@ -0,0 +1,283 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AlterPartitionsResponse implements org.apache.thrift.TBase<AlterPartitionsResponse, AlterPartitionsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<AlterPartitionsResponse> {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlterPartitionsResponse");
+
+
+ private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+ static {
+ schemes.put(StandardScheme.class, new AlterPartitionsResponseStandardSchemeFactory());
+ schemes.put(TupleScheme.class, new AlterPartitionsResponseTupleSchemeFactory());
+ }
+
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+ private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlterPartitionsResponse.class, metaDataMap);
+ }
+
+ public AlterPartitionsResponse() {
+ }
+
+ /**
+ * Performs a deep copy on <i>other</i>.
+ */
+ public AlterPartitionsResponse(AlterPartitionsResponse other) {
+ }
+
+ public AlterPartitionsResponse deepCopy() {
+ return new AlterPartitionsResponse(this);
+ }
+
+ @Override
+ public void clear() {
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof AlterPartitionsResponse)
+ return this.equals((AlterPartitionsResponse)that);
+ return false;
+ }
+
+ public boolean equals(AlterPartitionsResponse that) {
+ if (that == null)
+ return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ List<Object> list = new ArrayList<Object>();
+
+ return list.hashCode();
+ }
+
+ @Override
+ public int compareTo(AlterPartitionsResponse other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("AlterPartitionsResponse(");
+ boolean first = true;
+
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ // check for sub-struct validity
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private static class AlterPartitionsResponseStandardSchemeFactory implements SchemeFactory {
+ public AlterPartitionsResponseStandardScheme getScheme() {
+ return new AlterPartitionsResponseStandardScheme();
+ }
+ }
+
+ private static class AlterPartitionsResponseStandardScheme extends StandardScheme<AlterPartitionsResponse> {
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsResponse struct) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField schemeField;
+ iprot.readStructBegin();
+ while (true)
+ {
+ schemeField = iprot.readFieldBegin();
+ if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (schemeField.id) {
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+ struct.validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsResponse struct) throws org.apache.thrift.TException {
+ struct.validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ }
+
+ private static class AlterPartitionsResponseTupleSchemeFactory implements SchemeFactory {
+ public AlterPartitionsResponseTupleScheme getScheme() {
+ return new AlterPartitionsResponseTupleScheme();
+ }
+ }
+
+ private static class AlterPartitionsResponseTupleScheme extends TupleScheme<AlterPartitionsResponse> {
+
+ @Override
+ public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol oprot = (TTupleProtocol) prot;
+ }
+
+ @Override
+ public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsResponse struct) throws org.apache.thrift.TException {
+ TTupleProtocol iprot = (TTupleProtocol) prot;
+ }
+ }
+
+}
+
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
index 6ce7214..9fd43cc 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
@@ -40,6 +40,9 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField STATS_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("statsDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1);
private static final org.apache.thrift.protocol.TField STATS_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("statsObj", org.apache.thrift.protocol.TType.LIST, (short)2);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)3);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)5);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -49,11 +52,21 @@ import org.slf4j.LoggerFactory;
private ColumnStatisticsDesc statsDesc; // required
private List<ColumnStatisticsObj> statsObj; // required
+ private long txnId; // optional
+ private String validWriteIdList; // optional
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
STATS_DESC((short)1, "statsDesc"),
- STATS_OBJ((short)2, "statsObj");
+ STATS_OBJ((short)2, "statsObj"),
+ TXN_ID((short)3, "txnId"),
+ VALID_WRITE_ID_LIST((short)4, "validWriteIdList"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)5, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -72,6 +85,12 @@ import org.slf4j.LoggerFactory;
return STATS_DESC;
case 2: // STATS_OBJ
return STATS_OBJ;
+ case 3: // TXN_ID
+ return TXN_ID;
+ case 4: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
+ case 5: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -112,6 +131,9 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
+ private static final int __TXNID_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -120,11 +142,19 @@ import org.slf4j.LoggerFactory;
tmpMap.put(_Fields.STATS_OBJ, new org.apache.thrift.meta_data.FieldMetaData("statsObj", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatistics.class, metaDataMap);
}
public ColumnStatistics() {
+ this.txnId = -1L;
+
}
public ColumnStatistics(
@@ -140,6 +170,7 @@ import org.slf4j.LoggerFactory;
* Performs a deep copy on <i>other</i>.
*/
public ColumnStatistics(ColumnStatistics other) {
+ __isset_bitfield = other.__isset_bitfield;
if (other.isSetStatsDesc()) {
this.statsDesc = new ColumnStatisticsDesc(other.statsDesc);
}
@@ -150,6 +181,13 @@ import org.slf4j.LoggerFactory;
}
this.statsObj = __this__statsObj;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public ColumnStatistics deepCopy() {
@@ -160,6 +198,10 @@ import org.slf4j.LoggerFactory;
public void clear() {
this.statsDesc = null;
this.statsObj = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
+ this.isStatsCompliant = null;
}
public ColumnStatisticsDesc getStatsDesc() {
@@ -223,6 +265,82 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case STATS_DESC:
@@ -241,6 +359,30 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -252,6 +394,15 @@ import org.slf4j.LoggerFactory;
case STATS_OBJ:
return getStatsObj();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -267,6 +418,12 @@ import org.slf4j.LoggerFactory;
return isSetStatsDesc();
case STATS_OBJ:
return isSetStatsObj();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -302,6 +459,33 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -319,6 +503,21 @@ import org.slf4j.LoggerFactory;
if (present_statsObj)
list.add(statsObj);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -350,6 +549,36 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -385,6 +614,32 @@ import org.slf4j.LoggerFactory;
sb.append(this.statsObj);
}
first = false;
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -415,6 +670,8 @@ import org.slf4j.LoggerFactory;
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
@@ -467,6 +724,30 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 3: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 4: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 5: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -497,6 +778,25 @@ import org.slf4j.LoggerFactory;
}
oprot.writeFieldEnd();
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -522,6 +822,26 @@ import org.slf4j.LoggerFactory;
_iter272.write(oprot);
}
}
+ BitSet optionals = new BitSet();
+ if (struct.isSetTxnId()) {
+ optionals.set(0);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(1);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
@@ -542,6 +862,19 @@ import org.slf4j.LoggerFactory;
}
}
struct.setStatsObjIsSet(true);
+ BitSet incoming = iprot.readBitSet(3);
+ if (incoming.get(0)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(1)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
+ if (incoming.get(2)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
index 3c88d8f..821049e 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
@@ -42,6 +42,8 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
private static final org.apache.thrift.protocol.TField CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("capabilities", org.apache.thrift.protocol.TType.STRUCT, (short)3);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -53,13 +55,17 @@ import org.slf4j.LoggerFactory;
private String tblName; // required
private ClientCapabilities capabilities; // optional
private String catName; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
DB_NAME((short)1, "dbName"),
TBL_NAME((short)2, "tblName"),
CAPABILITIES((short)3, "capabilities"),
- CAT_NAME((short)4, "catName");
+ CAT_NAME((short)4, "catName"),
+ TXN_ID((short)5, "txnId"),
+ VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -82,6 +88,10 @@ import org.slf4j.LoggerFactory;
return CAPABILITIES;
case 4: // CAT_NAME
return CAT_NAME;
+ case 5: // TXN_ID
+ return TXN_ID;
+ case 6: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
default:
return null;
}
@@ -122,7 +132,9 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
- private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME};
+ private static final int __TXNID_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -134,11 +146,17 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClientCapabilities.class)));
tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableRequest.class, metaDataMap);
}
public GetTableRequest() {
+ this.txnId = -1L;
+
}
public GetTableRequest(
@@ -154,6 +172,7 @@ import org.slf4j.LoggerFactory;
* Performs a deep copy on <i>other</i>.
*/
public GetTableRequest(GetTableRequest other) {
+ __isset_bitfield = other.__isset_bitfield;
if (other.isSetDbName()) {
this.dbName = other.dbName;
}
@@ -166,6 +185,10 @@ import org.slf4j.LoggerFactory;
if (other.isSetCatName()) {
this.catName = other.catName;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
}
public GetTableRequest deepCopy() {
@@ -178,6 +201,9 @@ import org.slf4j.LoggerFactory;
this.tblName = null;
this.capabilities = null;
this.catName = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
}
public String getDbName() {
@@ -272,6 +298,51 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case DB_NAME:
@@ -306,6 +377,22 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
}
}
@@ -323,6 +410,12 @@ import org.slf4j.LoggerFactory;
case CAT_NAME:
return getCatName();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
}
throw new IllegalStateException();
}
@@ -342,6 +435,10 @@ import org.slf4j.LoggerFactory;
return isSetCapabilities();
case CAT_NAME:
return isSetCatName();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
}
throw new IllegalStateException();
}
@@ -395,6 +492,24 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
return true;
}
@@ -422,6 +537,16 @@ import org.slf4j.LoggerFactory;
if (present_catName)
list.add(catName);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
return list.hashCode();
}
@@ -473,6 +598,26 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -528,6 +673,22 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -558,6 +719,8 @@ import org.slf4j.LoggerFactory;
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
@@ -615,6 +778,22 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 5: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 6: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -652,6 +831,18 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -678,13 +869,25 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
optionals.set(1);
}
- oprot.writeBitSet(optionals, 2);
+ if (struct.isSetTxnId()) {
+ optionals.set(2);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(3);
+ }
+ oprot.writeBitSet(optionals, 4);
if (struct.isSetCapabilities()) {
struct.capabilities.write(oprot);
}
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
}
@Override
@@ -694,7 +897,7 @@ import org.slf4j.LoggerFactory;
struct.setDbNameIsSet(true);
struct.tblName = iprot.readString();
struct.setTblNameIsSet(true);
- BitSet incoming = iprot.readBitSet(2);
+ BitSet incoming = iprot.readBitSet(4);
if (incoming.get(0)) {
struct.capabilities = new ClientCapabilities();
struct.capabilities.read(iprot);
@@ -704,6 +907,14 @@ import org.slf4j.LoggerFactory;
struct.catName = iprot.readString();
struct.setCatNameIsSet(true);
}
+ if (incoming.get(2)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(3)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
index 968e250..80aff92 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
@@ -39,6 +39,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTableResult");
private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -47,10 +48,16 @@ import org.slf4j.LoggerFactory;
}
private Table table; // required
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- TABLE((short)1, "table");
+ TABLE((short)1, "table"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)2, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -67,6 +74,8 @@ import org.slf4j.LoggerFactory;
switch(fieldId) {
case 1: // TABLE
return TABLE;
+ case 2: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -107,11 +116,14 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
+ private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class)));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableResult.class, metaDataMap);
}
@@ -133,6 +145,9 @@ import org.slf4j.LoggerFactory;
if (other.isSetTable()) {
this.table = new Table(other.table);
}
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public GetTableResult deepCopy() {
@@ -142,6 +157,7 @@ import org.slf4j.LoggerFactory;
@Override
public void clear() {
this.table = null;
+ this.isStatsCompliant = null;
}
public Table getTable() {
@@ -167,6 +183,37 @@ import org.slf4j.LoggerFactory;
}
}
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case TABLE:
@@ -177,6 +224,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -185,6 +240,9 @@ import org.slf4j.LoggerFactory;
case TABLE:
return getTable();
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -198,6 +256,8 @@ import org.slf4j.LoggerFactory;
switch (field) {
case TABLE:
return isSetTable();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -224,6 +284,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -236,6 +305,11 @@ import org.slf4j.LoggerFactory;
if (present_table)
list.add(table);
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -257,6 +331,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -284,6 +368,16 @@ import org.slf4j.LoggerFactory;
sb.append(this.table);
}
first = false;
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -343,6 +437,14 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 2: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -361,6 +463,13 @@ import org.slf4j.LoggerFactory;
struct.table.write(oprot);
oprot.writeFieldEnd();
}
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -379,6 +488,14 @@ import org.slf4j.LoggerFactory;
public void write(org.apache.thrift.protocol.TProtocol prot, GetTableResult struct) throws org.apache.thrift.TException {
TTupleProtocol oprot = (TTupleProtocol) prot;
struct.table.write(oprot);
+ BitSet optionals = new BitSet();
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(0);
+ }
+ oprot.writeBitSet(optionals, 1);
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
@@ -387,6 +504,11 @@ import org.slf4j.LoggerFactory;
struct.table = new Table();
struct.table.read(iprot);
struct.setTableIsSet(true);
+ BitSet incoming = iprot.readBitSet(1);
+ if (incoming.get(0)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IsolationLevelCompliance.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IsolationLevelCompliance.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IsolationLevelCompliance.java
new file mode 100644
index 0000000..cb2559f
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IsolationLevelCompliance.java
@@ -0,0 +1,48 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum IsolationLevelCompliance implements org.apache.thrift.TEnum {
+ YES(1),
+ NO(2),
+ UNKNOWN(3);
+
+ private final int value;
+
+ private IsolationLevelCompliance(int value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the integer value of this enum value, as defined in the Thrift IDL.
+ */
+ public int getValue() {
+ return value;
+ }
+
+ /**
+ * Find a the enum type by its integer value, as defined in the Thrift IDL.
+ * @return null if the value is not found.
+ */
+ public static IsolationLevelCompliance findByValue(int value) {
+ switch (value) {
+ case 1:
+ return YES;
+ case 2:
+ return NO;
+ case 3:
+ return UNKNOWN;
+ default:
+ return null;
+ }
+ }
+}
[59/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
index 51f809a..5b40d2f 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
@@ -47,6 +47,9 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)7);
private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)8);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)10);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)11);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)12);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -63,6 +66,9 @@ import org.slf4j.LoggerFactory;
private Map<String,String> parameters; // required
private PrincipalPrivilegeSet privileges; // optional
private String catName; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -74,7 +80,14 @@ import org.slf4j.LoggerFactory;
SD((short)6, "sd"),
PARAMETERS((short)7, "parameters"),
PRIVILEGES((short)8, "privileges"),
- CAT_NAME((short)9, "catName");
+ CAT_NAME((short)9, "catName"),
+ TXN_ID((short)10, "txnId"),
+ VALID_WRITE_ID_LIST((short)11, "validWriteIdList"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)12, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -107,6 +120,12 @@ import org.slf4j.LoggerFactory;
return PRIVILEGES;
case 9: // CAT_NAME
return CAT_NAME;
+ case 10: // TXN_ID
+ return TXN_ID;
+ case 11: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
+ case 12: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -149,8 +168,9 @@ import org.slf4j.LoggerFactory;
// isset id assignments
private static final int __CREATETIME_ISSET_ID = 0;
private static final int __LASTACCESSTIME_ISSET_ID = 1;
+ private static final int __TXNID_ISSET_ID = 2;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME};
+ private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -175,11 +195,19 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class)));
tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Partition.class, metaDataMap);
}
public Partition() {
+ this.txnId = -1L;
+
}
public Partition(
@@ -233,6 +261,13 @@ import org.slf4j.LoggerFactory;
if (other.isSetCatName()) {
this.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.catName);
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public Partition deepCopy() {
@@ -252,6 +287,10 @@ import org.slf4j.LoggerFactory;
this.parameters = null;
this.privileges = null;
this.catName = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
+ this.isStatsCompliant = null;
}
public int getValuesSize() {
@@ -485,6 +524,82 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case VALUES:
@@ -559,6 +674,30 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -591,6 +730,15 @@ import org.slf4j.LoggerFactory;
case CAT_NAME:
return getCatName();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -620,6 +768,12 @@ import org.slf4j.LoggerFactory;
return isSetPrivileges();
case CAT_NAME:
return isSetCatName();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -718,6 +872,33 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -770,6 +951,21 @@ import org.slf4j.LoggerFactory;
if (present_catName)
list.add(catName);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -871,6 +1067,36 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -958,6 +1184,32 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -1105,6 +1357,30 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 10: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 11: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 12: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -1178,6 +1454,25 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -1223,7 +1518,16 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
optionals.set(8);
}
- oprot.writeBitSet(optionals, 9);
+ if (struct.isSetTxnId()) {
+ optionals.set(9);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(10);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(11);
+ }
+ oprot.writeBitSet(optionals, 12);
if (struct.isSetValues()) {
{
oprot.writeI32(struct.values.size());
@@ -1264,12 +1568,21 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(9);
+ BitSet incoming = iprot.readBitSet(12);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TList _list227 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
@@ -1328,6 +1641,18 @@ import org.slf4j.LoggerFactory;
struct.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
struct.setCatNameIsSet(true);
}
+ if (incoming.get(9)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(10)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
+ if (incoming.get(11)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
index 247fdaa..bc625b0 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
@@ -44,6 +44,9 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField SHARED_SDPARTITION_SPEC_FIELD_DESC = new org.apache.thrift.protocol.TField("sharedSDPartitionSpec", org.apache.thrift.protocol.TType.STRUCT, (short)4);
private static final org.apache.thrift.protocol.TField PARTITION_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionList", org.apache.thrift.protocol.TType.STRUCT, (short)5);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)7);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)9);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -57,6 +60,9 @@ import org.slf4j.LoggerFactory;
private PartitionSpecWithSharedSD sharedSDPartitionSpec; // optional
private PartitionListComposingSpec partitionList; // optional
private String catName; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -65,7 +71,14 @@ import org.slf4j.LoggerFactory;
ROOT_PATH((short)3, "rootPath"),
SHARED_SDPARTITION_SPEC((short)4, "sharedSDPartitionSpec"),
PARTITION_LIST((short)5, "partitionList"),
- CAT_NAME((short)6, "catName");
+ CAT_NAME((short)6, "catName"),
+ TXN_ID((short)7, "txnId"),
+ VALID_WRITE_ID_LIST((short)8, "validWriteIdList"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)9, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -92,6 +105,12 @@ import org.slf4j.LoggerFactory;
return PARTITION_LIST;
case 6: // CAT_NAME
return CAT_NAME;
+ case 7: // TXN_ID
+ return TXN_ID;
+ case 8: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
+ case 9: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -132,7 +151,9 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
- private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME};
+ private static final int __TXNID_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -148,11 +169,19 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionListComposingSpec.class)));
tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionSpec.class, metaDataMap);
}
public PartitionSpec() {
+ this.txnId = -1L;
+
}
public PartitionSpec(
@@ -170,6 +199,7 @@ import org.slf4j.LoggerFactory;
* Performs a deep copy on <i>other</i>.
*/
public PartitionSpec(PartitionSpec other) {
+ __isset_bitfield = other.__isset_bitfield;
if (other.isSetDbName()) {
this.dbName = other.dbName;
}
@@ -188,6 +218,13 @@ import org.slf4j.LoggerFactory;
if (other.isSetCatName()) {
this.catName = other.catName;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public PartitionSpec deepCopy() {
@@ -202,6 +239,10 @@ import org.slf4j.LoggerFactory;
this.sharedSDPartitionSpec = null;
this.partitionList = null;
this.catName = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
+ this.isStatsCompliant = null;
}
public String getDbName() {
@@ -342,6 +383,82 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case DB_NAME:
@@ -392,6 +509,30 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -415,6 +556,15 @@ import org.slf4j.LoggerFactory;
case CAT_NAME:
return getCatName();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -438,6 +588,12 @@ import org.slf4j.LoggerFactory;
return isSetPartitionList();
case CAT_NAME:
return isSetCatName();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -509,6 +665,33 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -546,6 +729,21 @@ import org.slf4j.LoggerFactory;
if (present_catName)
list.add(catName);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -617,6 +815,36 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -690,6 +918,32 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -715,6 +969,8 @@ import org.slf4j.LoggerFactory;
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
@@ -789,6 +1045,30 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 7: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 8: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 9: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -838,6 +1118,25 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -874,7 +1173,16 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
optionals.set(5);
}
- oprot.writeBitSet(optionals, 6);
+ if (struct.isSetTxnId()) {
+ optionals.set(6);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(7);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(8);
+ }
+ oprot.writeBitSet(optionals, 9);
if (struct.isSetDbName()) {
oprot.writeString(struct.dbName);
}
@@ -893,12 +1201,21 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(6);
+ BitSet incoming = iprot.readBitSet(9);
if (incoming.get(0)) {
struct.dbName = iprot.readString();
struct.setDbNameIsSet(true);
@@ -925,6 +1242,18 @@ import org.slf4j.LoggerFactory;
struct.catName = iprot.readString();
struct.setCatNameIsSet(true);
}
+ if (incoming.get(6)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(7)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
+ if (incoming.get(8)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
index 91cf567..a298b89 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
@@ -43,6 +43,8 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3);
private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)4);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)6);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -55,6 +57,8 @@ import org.slf4j.LoggerFactory;
private List<String> colNames; // required
private List<String> partNames; // required
private String catName; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -62,7 +66,9 @@ import org.slf4j.LoggerFactory;
TBL_NAME((short)2, "tblName"),
COL_NAMES((short)3, "colNames"),
PART_NAMES((short)4, "partNames"),
- CAT_NAME((short)5, "catName");
+ CAT_NAME((short)5, "catName"),
+ TXN_ID((short)6, "txnId"),
+ VALID_WRITE_ID_LIST((short)7, "validWriteIdList");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -87,6 +93,10 @@ import org.slf4j.LoggerFactory;
return PART_NAMES;
case 5: // CAT_NAME
return CAT_NAME;
+ case 6: // TXN_ID
+ return TXN_ID;
+ case 7: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
default:
return null;
}
@@ -127,7 +137,9 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
- private static final _Fields optionals[] = {_Fields.CAT_NAME};
+ private static final int __TXNID_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -143,11 +155,17 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsRequest.class, metaDataMap);
}
public PartitionsStatsRequest() {
+ this.txnId = -1L;
+
}
public PartitionsStatsRequest(
@@ -167,6 +185,7 @@ import org.slf4j.LoggerFactory;
* Performs a deep copy on <i>other</i>.
*/
public PartitionsStatsRequest(PartitionsStatsRequest other) {
+ __isset_bitfield = other.__isset_bitfield;
if (other.isSetDbName()) {
this.dbName = other.dbName;
}
@@ -184,6 +203,10 @@ import org.slf4j.LoggerFactory;
if (other.isSetCatName()) {
this.catName = other.catName;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
}
public PartitionsStatsRequest deepCopy() {
@@ -197,6 +220,9 @@ import org.slf4j.LoggerFactory;
this.colNames = null;
this.partNames = null;
this.catName = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
}
public String getDbName() {
@@ -344,6 +370,51 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case DB_NAME:
@@ -386,6 +457,22 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
}
}
@@ -406,6 +493,12 @@ import org.slf4j.LoggerFactory;
case CAT_NAME:
return getCatName();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
}
throw new IllegalStateException();
}
@@ -427,6 +520,10 @@ import org.slf4j.LoggerFactory;
return isSetPartNames();
case CAT_NAME:
return isSetCatName();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
}
throw new IllegalStateException();
}
@@ -489,6 +586,24 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
return true;
}
@@ -521,6 +636,16 @@ import org.slf4j.LoggerFactory;
if (present_catName)
list.add(catName);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
return list.hashCode();
}
@@ -582,6 +707,26 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -643,6 +788,22 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -678,6 +839,8 @@ import org.slf4j.LoggerFactory;
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
@@ -762,6 +925,22 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 6: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 7: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -816,6 +995,18 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -853,10 +1044,22 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
optionals.set(0);
}
- oprot.writeBitSet(optionals, 1);
+ if (struct.isSetTxnId()) {
+ optionals.set(1);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
}
@Override
@@ -888,11 +1091,19 @@ import org.slf4j.LoggerFactory;
}
}
struct.setPartNamesIsSet(true);
- BitSet incoming = iprot.readBitSet(1);
+ BitSet incoming = iprot.readBitSet(3);
if (incoming.get(0)) {
struct.catName = iprot.readString();
struct.setCatNameIsSet(true);
}
+ if (incoming.get(1)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(2)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
index 4caec8f..2414399 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
@@ -39,6 +39,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsResult");
private static final org.apache.thrift.protocol.TField PART_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("partStats", org.apache.thrift.protocol.TType.MAP, (short)1);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -47,10 +48,16 @@ import org.slf4j.LoggerFactory;
}
private Map<String,List<ColumnStatisticsObj>> partStats; // required
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- PART_STATS((short)1, "partStats");
+ PART_STATS((short)1, "partStats"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)2, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -67,6 +74,8 @@ import org.slf4j.LoggerFactory;
switch(fieldId) {
case 1: // PART_STATS
return PART_STATS;
+ case 2: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -107,6 +116,7 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
+ private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -115,6 +125,8 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING),
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsResult.class, metaDataMap);
}
@@ -151,6 +163,9 @@ import org.slf4j.LoggerFactory;
}
this.partStats = __this__partStats;
}
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public PartitionsStatsResult deepCopy() {
@@ -160,6 +175,7 @@ import org.slf4j.LoggerFactory;
@Override
public void clear() {
this.partStats = null;
+ this.isStatsCompliant = null;
}
public int getPartStatsSize() {
@@ -196,6 +212,37 @@ import org.slf4j.LoggerFactory;
}
}
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case PART_STATS:
@@ -206,6 +253,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -214,6 +269,9 @@ import org.slf4j.LoggerFactory;
case PART_STATS:
return getPartStats();
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -227,6 +285,8 @@ import org.slf4j.LoggerFactory;
switch (field) {
case PART_STATS:
return isSetPartStats();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -253,6 +313,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -265,6 +334,11 @@ import org.slf4j.LoggerFactory;
if (present_partStats)
list.add(partStats);
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -286,6 +360,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -313,6 +397,16 @@ import org.slf4j.LoggerFactory;
sb.append(this.partStats);
}
first = false;
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -391,6 +485,14 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 2: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -424,6 +526,13 @@ import org.slf4j.LoggerFactory;
}
oprot.writeFieldEnd();
}
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -455,6 +564,14 @@ import org.slf4j.LoggerFactory;
}
}
}
+ BitSet optionals = new BitSet();
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(0);
+ }
+ oprot.writeBitSet(optionals, 1);
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
@@ -483,6 +600,11 @@ import org.slf4j.LoggerFactory;
}
}
struct.setPartStatsIsSet(true);
+ BitSet incoming = iprot.readBitSet(1);
+ if (incoming.get(0)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
index a0ae84e..8f46012 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
@@ -40,6 +40,8 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1);
private static final org.apache.thrift.protocol.TField NEED_MERGE_FIELD_DESC = new org.apache.thrift.protocol.TField("needMerge", org.apache.thrift.protocol.TType.BOOL, (short)2);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)3);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -49,11 +51,15 @@ import org.slf4j.LoggerFactory;
private List<ColumnStatistics> colStats; // required
private boolean needMerge; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
COL_STATS((short)1, "colStats"),
- NEED_MERGE((short)2, "needMerge");
+ NEED_MERGE((short)2, "needMerge"),
+ TXN_ID((short)3, "txnId"),
+ VALID_WRITE_ID_LIST((short)4, "validWriteIdList");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -72,6 +78,10 @@ import org.slf4j.LoggerFactory;
return COL_STATS;
case 2: // NEED_MERGE
return NEED_MERGE;
+ case 3: // TXN_ID
+ return TXN_ID;
+ case 4: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
default:
return null;
}
@@ -113,8 +123,9 @@ import org.slf4j.LoggerFactory;
// isset id assignments
private static final int __NEEDMERGE_ISSET_ID = 0;
+ private static final int __TXNID_ISSET_ID = 1;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] = {_Fields.NEED_MERGE};
+ private static final _Fields optionals[] = {_Fields.NEED_MERGE,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -123,11 +134,17 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatistics.class))));
tmpMap.put(_Fields.NEED_MERGE, new org.apache.thrift.meta_data.FieldMetaData("needMerge", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SetPartitionsStatsRequest.class, metaDataMap);
}
public SetPartitionsStatsRequest() {
+ this.txnId = -1L;
+
}
public SetPartitionsStatsRequest(
@@ -150,6 +167,10 @@ import org.slf4j.LoggerFactory;
this.colStats = __this__colStats;
}
this.needMerge = other.needMerge;
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
}
public SetPartitionsStatsRequest deepCopy() {
@@ -161,6 +182,9 @@ import org.slf4j.LoggerFactory;
this.colStats = null;
setNeedMergeIsSet(false);
this.needMerge = false;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
}
public int getColStatsSize() {
@@ -223,6 +247,51 @@ import org.slf4j.LoggerFactory;
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NEEDMERGE_ISSET_ID, value);
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case COL_STATS:
@@ -241,6 +310,22 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
}
}
@@ -252,6 +337,12 @@ import org.slf4j.LoggerFactory;
case NEED_MERGE:
return isNeedMerge();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
}
throw new IllegalStateException();
}
@@ -267,6 +358,10 @@ import org.slf4j.LoggerFactory;
return isSetColStats();
case NEED_MERGE:
return isSetNeedMerge();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
}
throw new IllegalStateException();
}
@@ -302,6 +397,24 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
return true;
}
@@ -319,6 +432,16 @@ import org.slf4j.LoggerFactory;
if (present_needMerge)
list.add(needMerge);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
return list.hashCode();
}
@@ -350,6 +473,26 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -383,6 +526,22 @@ import org.slf4j.LoggerFactory;
sb.append(this.needMerge);
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -459,6 +618,22 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 3: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 4: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -489,6 +664,18 @@ import org.slf4j.LoggerFactory;
oprot.writeBool(struct.needMerge);
oprot.writeFieldEnd();
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -517,10 +704,22 @@ import org.slf4j.LoggerFactory;
if (struct.isSetNeedMerge()) {
optionals.set(0);
}
- oprot.writeBitSet(optionals, 1);
+ if (struct.isSetTxnId()) {
+ optionals.set(1);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
if (struct.isSetNeedMerge()) {
oprot.writeBool(struct.needMerge);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
}
@Override
@@ -538,11 +737,19 @@ import org.slf4j.LoggerFactory;
}
}
struct.setColStatsIsSet(true);
- BitSet incoming = iprot.readBitSet(1);
+ BitSet incoming = iprot.readBitSet(3);
if (incoming.get(0)) {
struct.needMerge = iprot.readBool();
struct.setNeedMergeIsSet(true);
}
+ if (incoming.get(1)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(2)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
}
}
[46/67] [abbrv] hive git commit: HIVE-19602: Refactor inplace
progress code in Hive-on-spark progress monitor to use ProgressMonitor
instance (Bharathkrishna Guruvayoor Murali, reviewed by Sahil Takiar, Rui Li)
Posted by se...@apache.org.
HIVE-19602: Refactor inplace progress code in Hive-on-spark progress monitor to use ProgressMonitor instance (Bharathkrishna Guruvayoor Murali, reviewed by Sahil Takiar, Rui Li)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c89cf6d5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c89cf6d5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c89cf6d5
Branch: refs/heads/master-txnstats
Commit: c89cf6d5de0343493dc629a0073b5c8e88359a6e
Parents: 3a6ad26
Author: Bharathkrishna Guruvayoor Murali <bh...@cloudera.com>
Authored: Mon Jun 18 10:03:01 2018 -0500
Committer: Sahil Takiar <st...@cloudera.com>
Committed: Mon Jun 18 10:03:01 2018 -0500
----------------------------------------------------------------------
.../ql/exec/spark/status/SparkJobMonitor.java | 166 +------------------
.../exec/spark/status/SparkProgressMonitor.java | 155 +++++++++++++++++
2 files changed, 160 insertions(+), 161 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/c89cf6d5/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
index e78b1cd..3531ac2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkJobMonitor.java
@@ -22,13 +22,9 @@ import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.common.log.InPlaceUpdate;
import org.apache.hadoop.hive.ql.log.PerfLogger;
import org.apache.hadoop.hive.ql.session.SessionState;
-import org.fusesource.jansi.Ansi;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.PrintStream;
-import java.text.DecimalFormat;
-import java.text.NumberFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashSet;
@@ -38,8 +34,6 @@ import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.TimeUnit;
-import static org.fusesource.jansi.Ansi.ansi;
-
abstract class SparkJobMonitor {
protected static final String CLASS_NAME = SparkJobMonitor.class.getName();
@@ -48,6 +42,7 @@ abstract class SparkJobMonitor {
protected final PerfLogger perfLogger = SessionState.getPerfLogger();
protected final int checkInterval = 1000;
protected final long monitorTimeoutInterval;
+ private final InPlaceUpdate inPlaceUpdateFn;
private final Set<String> completed = new HashSet<String>();
private final int printInterval = 3000;
@@ -61,94 +56,20 @@ abstract class SparkJobMonitor {
FINISHED
}
- // in-place progress update related variables
protected final boolean inPlaceUpdate;
- private int lines = 0;
- private final PrintStream out;
-
- private static final int COLUMN_1_WIDTH = 16;
- private static final String HEADER_FORMAT = "%16s%10s %13s %5s %9s %7s %7s %6s ";
- private static final String STAGE_FORMAT = "%-16s%10s %13s %5s %9s %7s %7s %6s ";
- private static final String HEADER = String.format(HEADER_FORMAT,
- "STAGES", "ATTEMPT", "STATUS", "TOTAL", "COMPLETED", "RUNNING", "PENDING", "FAILED");
- private static final int SEPARATOR_WIDTH = 86;
- private static final String SEPARATOR = new String(new char[SEPARATOR_WIDTH]).replace("\0", "-");
- private static final String FOOTER_FORMAT = "%-15s %-30s %-4s %-25s";
- private static final int progressBarChars = 30;
-
- private final NumberFormat secondsFormat = new DecimalFormat("#0.00");
protected SparkJobMonitor(HiveConf hiveConf) {
monitorTimeoutInterval = hiveConf.getTimeVar(
HiveConf.ConfVars.SPARK_JOB_MONITOR_TIMEOUT, TimeUnit.SECONDS);
inPlaceUpdate = InPlaceUpdate.canRenderInPlace(hiveConf) && !SessionState.getConsole().getIsSilent();
console = new SessionState.LogHelper(LOG);
- out = SessionState.LogHelper.getInfoStream();
+ inPlaceUpdateFn = new InPlaceUpdate(SessionState.LogHelper.getInfoStream());
}
public abstract int startMonitor();
private void printStatusInPlace(Map<SparkStage, SparkStageProgress> progressMap) {
-
- StringBuilder reportBuffer = new StringBuilder();
-
- // Num of total and completed tasks
- int sumTotal = 0;
- int sumComplete = 0;
-
- // position the cursor to line 0
- repositionCursor();
-
- // header
- reprintLine(SEPARATOR);
- reprintLineWithColorAsBold(HEADER, Ansi.Color.CYAN);
- reprintLine(SEPARATOR);
-
- SortedSet<SparkStage> keys = new TreeSet<SparkStage>(progressMap.keySet());
- int idx = 0;
- final int numKey = keys.size();
- for (SparkStage stage : keys) {
- SparkStageProgress progress = progressMap.get(stage);
- final int complete = progress.getSucceededTaskCount();
- final int total = progress.getTotalTaskCount();
- final int running = progress.getRunningTaskCount();
- final int failed = progress.getFailedTaskCount();
- sumTotal += total;
- sumComplete += complete;
-
- String s = stage.toString();
- StageState state = total > 0 ? StageState.PENDING : StageState.FINISHED;
- if (complete > 0 || running > 0 || failed > 0) {
- if (!perfLogger.startTimeHasMethod(PerfLogger.SPARK_RUN_STAGE + s)) {
- perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SPARK_RUN_STAGE + s);
- }
- if (complete < total) {
- state = StageState.RUNNING;
- } else {
- state = StageState.FINISHED;
- perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SPARK_RUN_STAGE + s);
- completed.add(s);
- }
- }
-
- String attempt = String.valueOf(stage.getAttemptId());
- String stageName = "Stage-" + String.valueOf(stage.getStageId());
- String nameWithProgress = getNameWithProgress(stageName, complete, total);
-
- final int pending = total - complete - running;
- String stageStr = String.format(STAGE_FORMAT,
- nameWithProgress, attempt, state, total, complete, running, pending, failed);
- reportBuffer.append(stageStr);
- if (idx++ != numKey - 1) {
- reportBuffer.append("\n");
- }
- }
- reprintMultiLine(reportBuffer.toString());
- reprintLine(SEPARATOR);
- final float progress = (sumTotal == 0) ? 1.0f : (float) sumComplete / (float) sumTotal;
- String footer = getFooter(numKey, completed.size(), progress, startTime);
- reprintLineWithColorAsBold(footer, Ansi.Color.RED);
- reprintLine(SEPARATOR);
+ inPlaceUpdateFn.render(getProgressMonitor(progressMap));
}
protected void printStatus(Map<SparkStage, SparkStageProgress> progressMap,
@@ -293,84 +214,7 @@ abstract class SparkJobMonitor {
return true;
}
- private void repositionCursor() {
- if (lines > 0) {
- out.print(ansi().cursorUp(lines).toString());
- out.flush();
- lines = 0;
- }
- }
-
- private void reprintLine(String line) {
- InPlaceUpdate.reprintLine(out, line);
- lines++;
- }
-
- private void reprintLineWithColorAsBold(String line, Ansi.Color color) {
- out.print(ansi().eraseLine(Ansi.Erase.ALL).fg(color).bold().a(line).a('\n').boldOff().reset()
- .toString());
- out.flush();
- lines++;
- }
-
- private String getNameWithProgress(String s, int complete, int total) {
- String result = "";
- if (s != null) {
- float percent = total == 0 ? 1.0f : (float) complete / (float) total;
- // lets use the remaining space in column 1 as progress bar
- int spaceRemaining = COLUMN_1_WIDTH - s.length() - 1;
- String trimmedVName = s;
-
- // if the vertex name is longer than column 1 width, trim it down
- if (s.length() > COLUMN_1_WIDTH) {
- trimmedVName = s.substring(0, COLUMN_1_WIDTH - 2);
- result = trimmedVName + "..";
- } else {
- result = trimmedVName + " ";
- }
-
- int toFill = (int) (spaceRemaining * percent);
- for (int i = 0; i < toFill; i++) {
- result += ".";
- }
- }
- return result;
- }
-
- // STAGES: 03/04 [==================>>-----] 86% ELAPSED TIME: 1.71 s
- private String getFooter(int keySize, int completedSize, float progress, long startTime) {
- String verticesSummary = String.format("STAGES: %02d/%02d", completedSize, keySize);
- String progressBar = getInPlaceProgressBar(progress);
- final int progressPercent = (int) (progress * 100);
- String progressStr = "" + progressPercent + "%";
- float et = (float) (System.currentTimeMillis() - startTime) / (float) 1000;
- String elapsedTime = "ELAPSED TIME: " + secondsFormat.format(et) + " s";
- String footer = String.format(FOOTER_FORMAT,
- verticesSummary, progressBar, progressStr, elapsedTime);
- return footer;
- }
-
- // [==================>>-----]
- private String getInPlaceProgressBar(float percent) {
- StringBuilder bar = new StringBuilder("[");
- int remainingChars = progressBarChars - 4;
- int completed = (int) (remainingChars * percent);
- int pending = remainingChars - completed;
- for (int i = 0; i < completed; i++) {
- bar.append("=");
- }
- bar.append(">>");
- for (int i = 0; i < pending; i++) {
- bar.append("-");
- }
- bar.append("]");
- return bar.toString();
- }
-
- private void reprintMultiLine(String line) {
- int numLines = line.split("\r\n|\r|\n").length;
- out.print(ansi().eraseLine(Ansi.Erase.ALL).a(line).a('\n').toString());
- out.flush();
- lines += numLines;
+ private SparkProgressMonitor getProgressMonitor(Map<SparkStage, SparkStageProgress> progressMap) {
+ return new SparkProgressMonitor(progressMap, startTime);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c89cf6d5/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkProgressMonitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkProgressMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkProgressMonitor.java
new file mode 100644
index 0000000..0c33db0
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/SparkProgressMonitor.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.exec.spark.status;
+
+import org.apache.hadoop.hive.common.log.ProgressMonitor;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+/**
+ * This class defines various parts of the progress update bar.
+ * Progressbar is displayed in hive-cli and typically rendered using InPlaceUpdate.
+ */
+class SparkProgressMonitor implements ProgressMonitor {
+
+ private Map<SparkStage, SparkStageProgress> progressMap;
+ private long startTime;
+ private static final int COLUMN_1_WIDTH = 16;
+
+ SparkProgressMonitor(Map<SparkStage, SparkStageProgress> progressMap, long startTime) {
+ this.progressMap = progressMap;
+ this.startTime = startTime;
+ }
+
+ @Override
+ public List<String> headers() {
+ return Arrays.asList("STAGES", "ATTEMPT", "STATUS", "TOTAL", "COMPLETED", "RUNNING", "PENDING", "FAILED", "");
+ }
+
+ @Override
+ public List<List<String>> rows() {
+ List<List<String>> progressRows = new ArrayList<>();
+ SortedSet<SparkStage> keys = new TreeSet<SparkStage>(progressMap.keySet());
+ for (SparkStage stage : keys) {
+ SparkStageProgress progress = progressMap.get(stage);
+ final int complete = progress.getSucceededTaskCount();
+ final int total = progress.getTotalTaskCount();
+ final int running = progress.getRunningTaskCount();
+ final int failed = progress.getFailedTaskCount();
+
+ SparkJobMonitor.StageState state =
+ total > 0 ? SparkJobMonitor.StageState.PENDING : SparkJobMonitor.StageState.FINISHED;
+ if (complete > 0 || running > 0 || failed > 0) {
+ if (complete < total) {
+ state = SparkJobMonitor.StageState.RUNNING;
+ } else {
+ state = SparkJobMonitor.StageState.FINISHED;
+ }
+ }
+ String attempt = String.valueOf(stage.getAttemptId());
+ String stageName = "Stage-" +String.valueOf(stage.getStageId());
+ String nameWithProgress = getNameWithProgress(stageName, complete, total);
+ final int pending = total - complete - running;
+
+ progressRows.add(Arrays
+ .asList(nameWithProgress, attempt, state.toString(), String.valueOf(total), String.valueOf(complete),
+ String.valueOf(running), String.valueOf(pending), String.valueOf(failed), ""));
+ }
+ return progressRows;
+ }
+
+ @Override
+ public String footerSummary() {
+ return String.format("STAGES: %02d/%02d", getCompletedStages(), progressMap.keySet().size());
+ }
+
+ @Override
+ public long startTime() {
+ return startTime;
+ }
+
+ @Override
+ public String executionStatus() {
+ if (getCompletedStages() == progressMap.keySet().size()) {
+ return SparkJobMonitor.StageState.FINISHED.toString();
+ } else {
+ return SparkJobMonitor.StageState.RUNNING.toString();
+ }
+ }
+
+ @Override
+ public double progressedPercentage() {
+
+ SortedSet<SparkStage> keys = new TreeSet<SparkStage>(progressMap.keySet());
+ int sumTotal = 0;
+ int sumComplete = 0;
+ for (SparkStage stage : keys) {
+ SparkStageProgress progress = progressMap.get(stage);
+ final int complete = progress.getSucceededTaskCount();
+ final int total = progress.getTotalTaskCount();
+ sumTotal += total;
+ sumComplete += complete;
+ }
+ double progress = (sumTotal == 0) ? 1.0f : (float) sumComplete / (float) sumTotal;
+ return progress;
+ }
+
+ private int getCompletedStages() {
+ int completed = 0;
+ SortedSet<SparkStage> keys = new TreeSet<SparkStage>(progressMap.keySet());
+ for (SparkStage stage : keys) {
+ SparkStageProgress progress = progressMap.get(stage);
+ final int complete = progress.getSucceededTaskCount();
+ final int total = progress.getTotalTaskCount();
+ if (total > 0 && complete == total) {
+ completed++;
+ }
+ }
+ return completed;
+ }
+
+ private String getNameWithProgress(String s, int complete, int total) {
+
+ if (s == null) {
+ return "";
+ }
+ float percent = total == 0 ? 1.0f : (float) complete / (float) total;
+ // lets use the remaining space in column 1 as progress bar
+ int spaceRemaining = COLUMN_1_WIDTH - s.length() - 1;
+ String trimmedVName = s;
+
+ // if the vertex name is longer than column 1 width, trim it down
+ if (s.length() > COLUMN_1_WIDTH) {
+ trimmedVName = s.substring(0, COLUMN_1_WIDTH - 2);
+ trimmedVName += "..";
+ } else {
+ trimmedVName += " ";
+ }
+ StringBuilder result = new StringBuilder(trimmedVName);
+ int toFill = (int) (spaceRemaining * percent);
+ for (int i = 0; i < toFill; i++) {
+ result.append(".");
+ }
+ return result.toString();
+ }
+}
[34/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dd512593
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dd512593
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dd512593
Branch: refs/heads/master-txnstats
Commit: dd5125939b5b5ae652e39725cfcf2379e6cb0fea
Parents: 040c078
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Sat Jun 16 11:23:55 2018 -0700
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Sat Jun 16 11:23:55 2018 -0700
----------------------------------------------------------------------
.../test/resources/testconfiguration.properties | 3 +
.../hive/llap/io/api/impl/LlapInputFormat.java | 6 +
.../io/decode/GenericColumnVectorProducer.java | 9 +-
.../llap/io/decode/OrcEncodedDataConsumer.java | 40 +-
.../llap/io/encoded/OrcEncodedDataReader.java | 65 +-
.../llap/io/encoded/SerDeEncodedDataReader.java | 3 +
.../io/encoded/VectorDeserializeOrcWriter.java | 49 +-
.../llap/io/metadata/ConsumerFileMetadata.java | 2 +
.../hive/llap/io/metadata/OrcFileMetadata.java | 9 +-
.../hadoop/hive/ql/exec/FetchOperator.java | 4 +-
.../ql/exec/vector/VectorizationContext.java | 7 +-
.../vector/VectorizedInputFormatInterface.java | 1 +
.../ql/exec/vector/VectorizedRowBatchCtx.java | 5 +
.../VectorInBloomFilterColDynamicValue.java | 1 +
.../aggregates/VectorUDAFBloomFilter.java | 1 +
.../ql/exec/vector/udf/VectorUDFAdaptor.java | 8 +
.../hadoop/hive/ql/io/BatchToRowReader.java | 8 +-
.../hadoop/hive/ql/io/NullRowsInputFormat.java | 6 +
.../hadoop/hive/ql/io/orc/OrcInputFormat.java | 8 +-
.../hive/ql/io/orc/OrcRawRecordMerger.java | 22 +-
.../apache/hadoop/hive/ql/io/orc/Reader.java | 12 +-
.../hadoop/hive/ql/io/orc/ReaderImpl.java | 9 +-
.../hadoop/hive/ql/io/orc/RecordReaderImpl.java | 25 +-
.../io/orc/VectorizedOrcAcidRowBatchReader.java | 24 +-
.../ql/io/orc/VectorizedOrcInputFormat.java | 8 +-
.../hadoop/hive/ql/io/orc/WriterImpl.java | 23 +-
.../orc/encoded/EncodedTreeReaderFactory.java | 205 +++-
.../ql/io/parquet/MapredParquetInputFormat.java | 6 +
.../hive/ql/optimizer/physical/Vectorizer.java | 115 +-
.../hive/ql/io/orc/TestInputOutputFormat.java | 81 +-
.../hive/ql/io/orc/TestOrcRawRecordMerger.java | 10 +-
.../hive/ql/io/orc/TestVectorizedORCReader.java | 3 +-
.../TestVectorizedOrcAcidRowBatchReader.java | 2 +-
.../queries/clientpositive/explainanalyze_3.q | 2 +-
ql/src/test/queries/clientpositive/llap_acid2.q | 31 +-
.../clientpositive/llap_decimal64_reader.q | 54 +
.../queries/clientpositive/llap_uncompressed.q | 13 +-
ql/src/test/queries/clientpositive/orc_create.q | 4 +-
.../queries/clientpositive/orc_llap_counters.q | 6 +-
.../queries/clientpositive/orc_llap_counters1.q | 7 +-
.../test/queries/clientpositive/orc_merge11.q | 8 +-
ql/src/test/queries/clientpositive/orc_merge5.q | 4 +-
ql/src/test/queries/clientpositive/orc_merge6.q | 4 +-
ql/src/test/queries/clientpositive/orc_merge7.q | 4 +-
.../clientpositive/orc_merge_incompat1.q | 4 +-
.../clientpositive/orc_merge_incompat2.q | 4 +-
.../test/queries/clientpositive/orc_ppd_basic.q | 7 +-
.../clientpositive/orc_ppd_schema_evol_3a.q | 6 +-
.../clientpositive/orc_schema_evolution_float.q | 2 +
.../clientpositive/orc_split_elimination.q | 4 +-
.../schema_evol_orc_nonvec_part_all_primitive.q | 2 +
...evol_orc_nonvec_part_all_primitive_llap_io.q | 2 +
.../schema_evol_orc_vec_part_all_primitive.q | 2 +
...ma_evol_orc_vec_part_all_primitive_llap_io.q | 2 +
.../clientpositive/type_change_test_int.q | 3 +
.../type_change_test_int_vectorized.q | 2 +
.../queries/clientpositive/vector_case_when_1.q | 22 +-
.../queries/clientpositive/vector_decimal_5.q | 3 +-
.../clientpositive/vector_decimal_mapjoin.q | 6 +
.../vectorized_dynamic_semijoin_reduction2.q | 2 +-
.../clientpositive/llap/acid_no_buckets.q.out | 32 +-
.../llap/acid_vectorization_original.q.out | 14 +-
.../llap/enforce_constraint_notnull.q.out | 14 +-
.../results/clientpositive/llap/llap_acid.q.out | 12 +-
.../clientpositive/llap/llap_acid2.q.out | 302 +++--
.../clientpositive/llap/llap_acid_fast.q.out | 12 +-
.../llap/llap_decimal64_reader.q.out | 303 +++++
.../clientpositive/llap/llap_partitioned.q.out | 11 +-
.../results/clientpositive/llap/llap_text.q.out | 1082 ++++++++++++++++++
.../clientpositive/llap/llap_uncompressed.q.out | 283 +++++
.../llap/llap_vector_nohybridgrace.q.out | 16 +-
.../llap/materialized_view_create.q.out | 6 +-
.../materialized_view_create_rewrite_5.q.out | 2 +-
.../llap/materialized_view_describe.q.out | 6 +-
.../results/clientpositive/llap/mergejoin.q.out | 76 +-
.../clientpositive/llap/orc_create.q.out | 12 +-
.../clientpositive/llap/orc_llap_counters.q.out | 86 +-
.../llap/orc_llap_counters1.q.out | 16 +-
.../clientpositive/llap/orc_merge11.q.out | 416 +++----
.../clientpositive/llap/orc_merge5.q.out | 30 +-
.../clientpositive/llap/orc_merge6.q.out | 40 +-
.../clientpositive/llap/orc_merge7.q.out | 76 +-
.../llap/orc_merge_incompat1.q.out | 28 +-
.../llap/orc_merge_incompat2.q.out | 50 +-
.../clientpositive/llap/orc_ppd_basic.q.out | 106 +-
.../llap/orc_ppd_schema_evol_3a.q.out | 88 +-
.../llap/orc_split_elimination.q.out | 8 +-
.../llap/orc_struct_type_vectorization.q.out | 8 +-
.../schema_evol_orc_acidvec_part_llap_io.q.out | 56 +-
.../llap/schema_evol_orc_acidvec_table.q.out | 56 +-
.../schema_evol_orc_acidvec_table_llap_io.q.out | 56 +-
.../llap/schema_evol_orc_vec_part.q.out | 36 +-
.../schema_evol_orc_vec_part_all_complex.q.out | 12 +-
..._evol_orc_vec_part_all_complex_llap_io.q.out | 12 +-
...schema_evol_orc_vec_part_all_primitive.q.out | 15 +-
...vol_orc_vec_part_all_primitive_llap_io.q.out | 15 +-
.../llap/schema_evol_orc_vec_table.q.out | 20 +-
.../schema_evol_orc_vec_table_llap_io.q.out | 20 +-
...evol_text_vec_part_all_complex_llap_io.q.out | 12 +-
...ol_text_vec_part_all_primitive_llap_io.q.out | 19 +-
.../schema_evol_text_vec_part_llap_io.q.out | 27 +-
.../schema_evol_text_vec_table_llap_io.q.out | 42 +-
.../llap/vector_adaptor_usage_mode.q.out | 16 +-
.../llap/vector_aggregate_9.q.out | 12 +-
.../llap/vector_aggregate_without_gby.q.out | 4 +-
.../llap/vector_annotate_stats_select.q.out | 96 +-
.../llap/vector_auto_smb_mapjoin_14.q.out | 8 +-
.../llap/vector_between_columns.q.out | 16 +-
.../clientpositive/llap/vector_between_in.q.out | 48 +-
.../llap/vector_binary_join_groupby.q.out | 27 +-
.../clientpositive/llap/vector_bround.q.out | 4 +-
.../llap/vector_case_when_1.q.out | 433 +++----
.../llap/vector_case_when_2.q.out | 8 +-
.../llap/vector_cast_constant.q.out | 4 +-
.../clientpositive/llap/vector_char_2.q.out | 8 +-
.../clientpositive/llap/vector_char_4.q.out | 4 +-
.../llap/vector_char_mapjoin1.q.out | 24 +-
.../llap/vector_char_simple.q.out | 12 +-
.../llap/vector_char_varchar_1.q.out | 6 +-
.../clientpositive/llap/vector_coalesce.q.out | 24 +-
.../clientpositive/llap/vector_coalesce_2.q.out | 16 +-
.../clientpositive/llap/vector_coalesce_3.q.out | 8 +-
.../clientpositive/llap/vector_coalesce_4.q.out | 4 +-
.../llap/vector_complex_all.q.out | 40 +-
.../llap/vector_complex_join.q.out | 24 +-
.../clientpositive/llap/vector_count.q.out | 8 +-
.../llap/vector_count_distinct.q.out | 4 +-
.../llap/vector_create_struct_table.q.out | 9 +-
.../clientpositive/llap/vector_data_types.q.out | 8 +-
.../clientpositive/llap/vector_date_1.q.out | 28 +-
.../clientpositive/llap/vector_decimal_1.q.out | 144 +--
.../llap/vector_decimal_10_0.q.out | 15 +-
.../clientpositive/llap/vector_decimal_2.q.out | 344 +++---
.../clientpositive/llap/vector_decimal_5.q.out | 154 ++-
.../clientpositive/llap/vector_decimal_6.q.out | 94 +-
.../llap/vector_decimal_aggregate.q.out | 32 +-
.../llap/vector_decimal_cast.q.out | 7 +-
.../llap/vector_decimal_expressions.q.out | 24 +-
.../llap/vector_decimal_mapjoin.q.out | 554 ++-------
.../llap/vector_decimal_math_funcs.q.out | 20 +-
.../llap/vector_decimal_precision.q.out | 7 +-
.../llap/vector_decimal_round.q.out | 62 +-
.../llap/vector_decimal_round_2.q.out | 16 +-
.../llap/vector_decimal_trailing.q.out | 8 +-
.../llap/vector_decimal_udf.q.out | 488 ++++----
.../llap/vector_decimal_udf2.q.out | 38 +-
.../clientpositive/llap/vector_distinct_2.q.out | 4 +-
.../clientpositive/llap/vector_elt.q.out | 8 +-
.../clientpositive/llap/vector_groupby4.q.out | 4 +-
.../clientpositive/llap/vector_groupby6.q.out | 4 +-
.../clientpositive/llap/vector_groupby_3.q.out | 4 +-
.../llap/vector_groupby_cube1.q.out | 15 +-
.../llap/vector_groupby_grouping_id1.q.out | 24 +-
.../llap/vector_groupby_grouping_id2.q.out | 36 +-
.../llap/vector_groupby_grouping_id3.q.out | 8 +-
.../llap/vector_groupby_grouping_sets1.q.out | 28 +-
.../llap/vector_groupby_grouping_sets2.q.out | 12 +-
.../vector_groupby_grouping_sets3_dec.q.out | 30 +-
.../llap/vector_groupby_grouping_sets4.q.out | 12 +-
.../llap/vector_groupby_grouping_sets5.q.out | 12 +-
.../llap/vector_groupby_grouping_sets6.q.out | 8 +-
.../vector_groupby_grouping_sets_grouping.q.out | 60 +-
.../vector_groupby_grouping_sets_limit.q.out | 24 +-
.../llap/vector_groupby_grouping_window.q.out | 4 +-
.../llap/vector_groupby_mapjoin.q.out | 6 +-
.../llap/vector_groupby_reduce.q.out | 16 +-
.../llap/vector_groupby_rollup1.q.out | 12 +-
.../llap/vector_groupby_sort_11.q.out | 12 +-
.../llap/vector_groupby_sort_8.q.out | 3 +-
.../llap/vector_grouping_sets.q.out | 8 +-
.../clientpositive/llap/vector_if_expr.q.out | 4 +-
.../clientpositive/llap/vector_if_expr_2.q.out | 4 +-
.../llap/vector_include_no_sel.q.out | 8 +-
.../clientpositive/llap/vector_inner_join.q.out | 72 +-
.../clientpositive/llap/vector_interval_1.q.out | 32 +-
.../clientpositive/llap/vector_interval_2.q.out | 40 +-
.../llap/vector_interval_arithmetic.q.out | 32 +-
.../llap/vector_interval_mapjoin.q.out | 8 +-
.../clientpositive/llap/vector_join30.q.out | 84 +-
.../llap/vector_left_outer_join.q.out | 12 +-
.../llap/vector_left_outer_join2.q.out | 32 +-
.../llap/vector_leftsemi_mapjoin.q.out | 768 ++++++-------
.../clientpositive/llap/vector_like_2.q.out | 4 +-
.../llap/vector_llap_text_1.q.out | 6 +-
.../clientpositive/llap/vector_map_order.q.out | 3 +-
.../llap/vector_mapjoin_reduce.q.out | 18 +-
.../llap/vector_mr_diff_schema_alias.q.out | 8 +-
.../llap/vector_multi_insert.q.out | 4 +-
.../clientpositive/llap/vector_null_map.q.out | 6 +-
.../llap/vector_null_projection.q.out | 4 +-
.../llap/vector_nullsafe_join.q.out | 112 +-
.../llap/vector_number_compare_projection.q.out | 8 +-
.../clientpositive/llap/vector_nvl.q.out | 16 +-
.../llap/vector_orc_merge_incompat_schema.q.out | 7 +-
.../llap/vector_orc_nested_column_pruning.q.out | 96 +-
.../llap/vector_orc_null_check.q.out | 4 +-
.../clientpositive/llap/vector_order_null.q.out | 33 +-
.../clientpositive/llap/vector_orderby_5.q.out | 4 +-
.../llap/vector_outer_join0.q.out | 16 +-
.../llap/vector_outer_join1.q.out | 28 +-
.../llap/vector_outer_join2.q.out | 12 +-
.../llap/vector_outer_reference_windowed.q.out | 176 ++-
.../llap/vector_partition_diff_num_cols.q.out | 20 +-
.../llap/vector_partitioned_date_time.q.out | 32 +-
.../clientpositive/llap/vector_ptf_1.q.out | 3 +-
.../llap/vector_ptf_part_simple.q.out | 120 +-
.../clientpositive/llap/vector_reduce1.q.out | 4 +-
.../clientpositive/llap/vector_reduce2.q.out | 4 +-
.../clientpositive/llap/vector_reduce3.q.out | 4 +-
.../llap/vector_reduce_groupby_decimal.q.out | 4 +-
.../vector_reduce_groupby_duplicate_cols.q.out | 3 +-
.../llap/vector_retry_failure.q.out | 3 +-
.../llap/vector_reuse_scratchcols.q.out | 8 +-
.../llap/vector_string_concat.q.out | 8 +-
.../llap/vector_string_decimal.q.out | 6 +-
.../clientpositive/llap/vector_struct_in.q.out | 32 +-
.../clientpositive/llap/vector_udf1.q.out | 112 +-
.../clientpositive/llap/vector_udf2.q.out | 12 +-
.../llap/vector_udf_adaptor_1.q.out | 16 +-
.../clientpositive/llap/vector_varchar_4.q.out | 4 +-
.../llap/vector_varchar_mapjoin1.q.out | 24 +-
.../llap/vector_varchar_simple.q.out | 12 +-
.../llap/vector_when_case_null.q.out | 4 +-
.../clientpositive/llap/vector_windowing.q.out | 141 +--
.../llap/vector_windowing_expressions.q.out | 50 +-
.../llap/vector_windowing_gby.q.out | 7 +-
.../llap/vector_windowing_gby2.q.out | 16 +-
.../vector_windowing_multipartitioning.q.out | 42 +-
.../llap/vector_windowing_navfn.q.out | 57 +-
.../llap/vector_windowing_order_null.q.out | 56 +-
.../vector_windowing_range_multiorder.q.out | 77 +-
.../llap/vector_windowing_rank.q.out | 70 +-
.../llap/vector_windowing_streaming.q.out | 17 +-
.../llap/vector_windowing_windowspec.q.out | 77 +-
.../llap/vector_windowing_windowspec4.q.out | 3 +-
.../clientpositive/llap/vectorization_0.q.out | 40 +-
.../clientpositive/llap/vectorization_1.q.out | 4 +-
.../clientpositive/llap/vectorization_10.q.out | 4 +-
.../clientpositive/llap/vectorization_11.q.out | 4 +-
.../clientpositive/llap/vectorization_12.q.out | 4 +-
.../clientpositive/llap/vectorization_13.q.out | 8 +-
.../clientpositive/llap/vectorization_14.q.out | 4 +-
.../clientpositive/llap/vectorization_15.q.out | 4 +-
.../clientpositive/llap/vectorization_16.q.out | 4 +-
.../clientpositive/llap/vectorization_17.q.out | 4 +-
.../clientpositive/llap/vectorization_2.q.out | 4 +-
.../clientpositive/llap/vectorization_3.q.out | 4 +-
.../clientpositive/llap/vectorization_4.q.out | 4 +-
.../clientpositive/llap/vectorization_5.q.out | 4 +-
.../clientpositive/llap/vectorization_6.q.out | 4 +-
.../clientpositive/llap/vectorization_7.q.out | 8 +-
.../clientpositive/llap/vectorization_8.q.out | 8 +-
.../clientpositive/llap/vectorization_9.q.out | 4 +-
.../llap/vectorization_decimal_date.q.out | 4 +-
.../llap/vectorization_div0.q.out | 16 +-
.../llap/vectorization_limit.q.out | 24 +-
.../llap/vectorization_nested_udf.q.out | 4 +-
.../llap/vectorization_part_project.q.out | 4 +-
.../llap/vectorization_pushdown.q.out | 4 +-
.../llap/vectorization_short_regress.q.out | 80 +-
.../clientpositive/llap/vectorized_case.q.out | 68 +-
.../clientpositive/llap/vectorized_casts.q.out | 4 +-
.../llap/vectorized_context.q.out | 12 +-
.../llap/vectorized_date_funcs.q.out | 20 +-
.../llap/vectorized_distinct_gby.q.out | 8 +-
.../vectorized_dynamic_partition_pruning.q.out | 287 ++---
.../vectorized_dynamic_semijoin_reduction.q.out | 52 +-
...vectorized_dynamic_semijoin_reduction2.q.out | 138 ++-
.../llap/vectorized_mapjoin.q.out | 8 +-
.../llap/vectorized_mapjoin3.q.out | 69 +-
.../llap/vectorized_math_funcs.q.out | 4 +-
.../llap/vectorized_nested_mapjoin.q.out | 12 +-
.../clientpositive/llap/vectorized_ptf.q.out | 100 +-
.../llap/vectorized_shufflejoin.q.out | 8 +-
.../llap/vectorized_string_funcs.q.out | 4 +-
.../llap/vectorized_timestamp.q.out | 16 +-
.../llap/vectorized_timestamp_funcs.q.out | 28 +-
.../llap/vectorized_timestamp_ints_casts.q.out | 8 +-
.../test/results/clientpositive/mergejoin.q.out | 8 +-
.../results/clientpositive/orc_file_dump.q.out | 54 +-
.../results/clientpositive/orc_merge11.q.out | 416 +++----
.../results/clientpositive/orc_merge5.q.out | 30 +-
.../results/clientpositive/orc_merge6.q.out | 40 +-
.../clientpositive/orc_merge_incompat1.q.out | 28 +-
.../clientpositive/orc_merge_incompat2.q.out | 50 +-
.../orc_struct_type_vectorization.q.out | 8 +-
.../clientpositive/spark/orc_merge5.q.out | 22 +-
.../clientpositive/spark/orc_merge6.q.out | 32 +-
.../clientpositive/spark/orc_merge7.q.out | 64 +-
.../spark/orc_merge_incompat1.q.out | 22 +-
.../spark/orc_merge_incompat2.q.out | 46 +-
...k_vectorized_dynamic_partition_pruning.q.out | 240 ++--
.../spark/vector_between_in.q.out | 48 +-
.../spark/vector_cast_constant.q.out | 4 +-
.../clientpositive/spark/vector_char_4.q.out | 4 +-
.../spark/vector_count_distinct.q.out | 4 +-
.../spark/vector_data_types.q.out | 8 +-
.../spark/vector_decimal_aggregate.q.out | 8 +-
.../spark/vector_decimal_mapjoin.q.out | 82 +-
.../spark/vector_distinct_2.q.out | 4 +-
.../clientpositive/spark/vector_elt.q.out | 8 +-
.../clientpositive/spark/vector_groupby_3.q.out | 4 +-
.../spark/vector_inner_join.q.out | 72 +-
.../spark/vector_left_outer_join.q.out | 12 +-
.../clientpositive/spark/vector_orderby_5.q.out | 4 +-
.../spark/vector_outer_join0.q.out | 16 +-
.../spark/vector_outer_join1.q.out | 28 +-
.../spark/vector_outer_join2.q.out | 12 +-
.../spark/vector_string_concat.q.out | 8 +-
.../clientpositive/spark/vector_varchar_4.q.out | 4 +-
.../clientpositive/spark/vectorization_0.q.out | 40 +-
.../clientpositive/spark/vectorization_1.q.out | 4 +-
.../clientpositive/spark/vectorization_10.q.out | 4 +-
.../clientpositive/spark/vectorization_11.q.out | 4 +-
.../clientpositive/spark/vectorization_12.q.out | 4 +-
.../clientpositive/spark/vectorization_13.q.out | 8 +-
.../clientpositive/spark/vectorization_14.q.out | 4 +-
.../clientpositive/spark/vectorization_15.q.out | 4 +-
.../clientpositive/spark/vectorization_16.q.out | 4 +-
.../clientpositive/spark/vectorization_17.q.out | 4 +-
.../clientpositive/spark/vectorization_2.q.out | 4 +-
.../clientpositive/spark/vectorization_3.q.out | 4 +-
.../clientpositive/spark/vectorization_4.q.out | 4 +-
.../clientpositive/spark/vectorization_5.q.out | 4 +-
.../clientpositive/spark/vectorization_6.q.out | 4 +-
.../clientpositive/spark/vectorization_9.q.out | 4 +-
.../spark/vectorization_decimal_date.q.out | 4 +-
.../spark/vectorization_div0.q.out | 16 +-
.../spark/vectorization_nested_udf.q.out | 4 +-
.../spark/vectorization_part_project.q.out | 4 +-
.../spark/vectorization_pushdown.q.out | 4 +-
.../spark/vectorization_short_regress.q.out | 80 +-
.../clientpositive/spark/vectorized_case.q.out | 68 +-
.../spark/vectorized_mapjoin.q.out | 8 +-
.../spark/vectorized_math_funcs.q.out | 4 +-
.../spark/vectorized_nested_mapjoin.q.out | 12 +-
.../clientpositive/spark/vectorized_ptf.q.out | 100 +-
.../spark/vectorized_shufflejoin.q.out | 8 +-
.../spark/vectorized_string_funcs.q.out | 4 +-
.../spark/vectorized_timestamp_funcs.q.out | 28 +-
.../tez/acid_vectorization_original_tez.q.out | 38 +-
.../clientpositive/tez/explainanalyze_3.q.out | 4 +-
.../tez/vector_non_string_partition.q.out | 8 +-
.../clientpositive/vector_aggregate_9.q.out | 12 +-
.../vector_aggregate_without_gby.q.out | 4 +-
.../clientpositive/vector_between_columns.q.out | 8 +-
.../vector_binary_join_groupby.q.out | 14 +-
.../results/clientpositive/vector_bround.q.out | 4 +-
.../clientpositive/vector_case_when_1.q.out | 426 +++----
.../clientpositive/vector_case_when_2.q.out | 8 +-
.../clientpositive/vector_cast_constant.q.out | 4 +-
.../results/clientpositive/vector_char_2.q.out | 8 +-
.../results/clientpositive/vector_char_4.q.out | 4 +-
.../clientpositive/vector_char_mapjoin1.q.out | 12 +-
.../clientpositive/vector_char_simple.q.out | 12 +-
.../clientpositive/vector_coalesce.q.out | 24 +-
.../clientpositive/vector_coalesce_2.q.out | 16 +-
.../clientpositive/vector_coalesce_3.q.out | 4 +-
.../clientpositive/vector_coalesce_4.q.out | 4 +-
.../results/clientpositive/vector_count.q.out | 8 +-
.../clientpositive/vector_data_types.q.out | 8 +-
.../results/clientpositive/vector_date_1.q.out | 28 +-
.../clientpositive/vector_decimal_1.q.out | 126 +-
.../clientpositive/vector_decimal_10_0.q.out | 8 +-
.../clientpositive/vector_decimal_5.q.out | 150 ++-
.../clientpositive/vector_decimal_6.q.out | 28 +-
.../vector_decimal_aggregate.q.out | 8 +-
.../clientpositive/vector_decimal_cast.q.out | 4 +-
.../vector_decimal_expressions.q.out | 22 +-
.../clientpositive/vector_decimal_mapjoin.q.out | 74 +-
.../vector_decimal_math_funcs.q.out | 20 +-
.../vector_decimal_precision.q.out | 4 +-
.../clientpositive/vector_decimal_round.q.out | 28 +-
.../clientpositive/vector_decimal_round_2.q.out | 16 +-
.../vector_decimal_trailing.q.out | 8 +-
.../clientpositive/vector_decimal_udf2.q.out | 20 +-
.../vector_delete_orig_table.q.out | 4 +-
.../clientpositive/vector_distinct_2.q.out | 4 +-
.../results/clientpositive/vector_elt.q.out | 8 +-
.../clientpositive/vector_empty_where.q.out | 16 +-
.../clientpositive/vector_groupby4.q.out | 4 +-
.../clientpositive/vector_groupby6.q.out | 4 +-
.../clientpositive/vector_groupby_3.q.out | 4 +-
.../clientpositive/vector_groupby_reduce.q.out | 16 +-
.../clientpositive/vector_grouping_sets.q.out | 8 +-
.../results/clientpositive/vector_if_expr.q.out | 4 +-
.../clientpositive/vector_include_no_sel.q.out | 4 +-
.../clientpositive/vector_interval_1.q.out | 32 +-
.../vector_interval_arithmetic.q.out | 32 +-
.../vector_interval_mapjoin.q.out | 4 +-
.../clientpositive/vector_left_outer_join.q.out | 4 +-
.../vector_left_outer_join2.q.out | 16 +-
.../clientpositive/vector_multi_insert.q.out | 4 +-
.../vector_non_string_partition.q.out | 8 +-
.../clientpositive/vector_null_projection.q.out | 4 +-
.../results/clientpositive/vector_nvl.q.out | 16 +-
.../clientpositive/vector_orderby_5.q.out | 4 +-
.../clientpositive/vector_outer_join0.q.out | 8 +-
.../clientpositive/vector_outer_join1.q.out | 12 +-
.../clientpositive/vector_outer_join2.q.out | 4 +-
.../clientpositive/vector_outer_join3.q.out | 6 +-
.../clientpositive/vector_outer_join4.q.out | 6 +-
.../clientpositive/vector_outer_join6.q.out | 4 +-
.../vector_outer_join_no_keys.q.out | 8 +-
.../results/clientpositive/vector_reduce1.q.out | 4 +-
.../results/clientpositive/vector_reduce2.q.out | 4 +-
.../results/clientpositive/vector_reduce3.q.out | 4 +-
.../vector_reduce_groupby_decimal.q.out | 4 +-
.../clientpositive/vector_string_concat.q.out | 8 +-
.../clientpositive/vector_struct_in.q.out | 32 +-
.../vector_tablesample_rows.q.out | 8 +-
.../results/clientpositive/vector_udf3.q.out | 4 +-
.../clientpositive/vector_varchar_4.q.out | 4 +-
.../vector_varchar_mapjoin1.q.out | 12 +-
.../clientpositive/vector_varchar_simple.q.out | 12 +-
.../clientpositive/vector_when_case_null.q.out | 4 +-
.../clientpositive/vectorization_1.q.out | 4 +-
.../clientpositive/vectorization_10.q.out | 4 +-
.../clientpositive/vectorization_11.q.out | 4 +-
.../clientpositive/vectorization_12.q.out | 4 +-
.../clientpositive/vectorization_13.q.out | 8 +-
.../clientpositive/vectorization_14.q.out | 4 +-
.../clientpositive/vectorization_15.q.out | 4 +-
.../clientpositive/vectorization_16.q.out | 4 +-
.../clientpositive/vectorization_17.q.out | 4 +-
.../clientpositive/vectorization_2.q.out | 4 +-
.../clientpositive/vectorization_3.q.out | 4 +-
.../clientpositive/vectorization_4.q.out | 4 +-
.../clientpositive/vectorization_5.q.out | 4 +-
.../clientpositive/vectorization_6.q.out | 4 +-
.../clientpositive/vectorization_7.q.out | 8 +-
.../clientpositive/vectorization_8.q.out | 8 +-
.../clientpositive/vectorization_9.q.out | 4 +-
.../vectorization_decimal_date.q.out | 4 +-
.../clientpositive/vectorization_limit.q.out | 20 +-
.../vectorization_nested_udf.q.out | 4 +-
.../vectorization_offset_limit.q.out | 8 +-
.../vectorization_part_project.q.out | 4 +-
.../clientpositive/vectorization_pushdown.q.out | 4 +-
.../clientpositive/vectorized_case.q.out | 68 +-
.../clientpositive/vectorized_casts.q.out | 4 +-
.../clientpositive/vectorized_context.q.out | 4 +-
.../clientpositive/vectorized_date_funcs.q.out | 20 +-
.../clientpositive/vectorized_mapjoin.q.out | 4 +-
.../clientpositive/vectorized_mapjoin2.q.out | 4 +-
.../clientpositive/vectorized_mapjoin3.q.out | 12 +-
.../clientpositive/vectorized_math_funcs.q.out | 4 +-
.../vectorized_string_funcs.q.out | 4 +-
.../clientpositive/vectorized_timestamp.q.out | 16 +-
.../vectorized_timestamp_funcs.q.out | 28 +-
.../vectorized_timestamp_ints_casts.q.out | 8 +-
451 files changed, 8616 insertions(+), 6642 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index c7d2285..aeb6211 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -539,6 +539,9 @@ minillaplocal.query.files=\
llap_partitioned.q,\
llap_smb.q,\
llap_vector_nohybridgrace.q,\
+ llap_uncompressed.q,\
+ llap_decimal64_reader.q,\
+ llap_text.q,\
load_data_acid_rename.q,\
load_data_using_job.q,\
load_dyn_part5.q,\
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
index 40f7c83..ac1aca8 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hive.llap.io.api.impl;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedSupport;
import org.apache.hadoop.hive.ql.io.BatchToRowInputFormat;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -232,4 +233,9 @@ public class LlapInputFormat implements InputFormat<NullWritable, VectorizedRowB
}
return tableScanOperator;
}
+
+ @Override
+ public VectorizedSupport.Support[] getSupportedFeatures() {
+ return new VectorizedSupport.Support[] {VectorizedSupport.Support.DECIMAL_64};
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/GenericColumnVectorProducer.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/GenericColumnVectorProducer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/GenericColumnVectorProducer.java
index 7af1b05..32f3bed 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/GenericColumnVectorProducer.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/GenericColumnVectorProducer.java
@@ -31,13 +31,11 @@ import org.apache.hadoop.hive.llap.cache.SerDeLowLevelCacheImpl;
import org.apache.hadoop.hive.llap.counters.QueryFragmentCounters;
import org.apache.hadoop.hive.llap.io.api.impl.ColumnVectorBatch;
import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl;
-import org.apache.hadoop.hive.llap.io.decode.ColumnVectorProducer.Includes;
import org.apache.hadoop.hive.llap.io.encoded.SerDeEncodedDataReader;
import org.apache.hadoop.hive.llap.io.metadata.ConsumerFileMetadata;
import org.apache.hadoop.hive.llap.io.metadata.ConsumerStripeMetadata;
import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics;
import org.apache.hadoop.hive.llap.metrics.LlapDaemonIOMetrics;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
import org.apache.hadoop.hive.ql.io.orc.encoded.Consumer;
import org.apache.hadoop.hive.ql.io.orc.encoded.IoTrace;
@@ -52,8 +50,8 @@ import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hive.common.util.FixedSizedObjectPool;
import org.apache.orc.CompressionKind;
+import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
-import org.apache.orc.OrcUtils;
import org.apache.orc.OrcProto.ColumnEncoding;
import org.apache.orc.OrcProto.RowIndex;
import org.apache.orc.OrcProto.RowIndexEntry;
@@ -289,5 +287,10 @@ public class GenericColumnVectorProducer implements ColumnVectorProducer {
public TypeDescription getSchema() {
return schema;
}
+
+ @Override
+ public OrcFile.Version getFileVersion() {
+ return null;
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
index feccb87..0d7435c 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hive.llap.io.metadata.ConsumerStripeMetadata;
import org.apache.hadoop.hive.llap.metrics.LlapDaemonIOMetrics;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
@@ -73,10 +74,11 @@ public class OrcEncodedDataConsumer
private IoTrace trace;
private final Includes includes;
private TypeDescription[] batchSchemas;
+ private boolean useDecimal64ColumnVectors;
public OrcEncodedDataConsumer(
- Consumer<ColumnVectorBatch> consumer, Includes includes, boolean skipCorrupt,
- QueryFragmentCounters counters, LlapDaemonIOMetrics ioMetrics) {
+ Consumer<ColumnVectorBatch> consumer, Includes includes, boolean skipCorrupt,
+ QueryFragmentCounters counters, LlapDaemonIOMetrics ioMetrics) {
super(consumer, includes.getPhysicalColumnIds().size(), ioMetrics);
this.includes = includes;
// TODO: get rid of this
@@ -84,6 +86,10 @@ public class OrcEncodedDataConsumer
this.counters = counters;
}
+ public void setUseDecimal64ColumnVectors(final boolean useDecimal64ColumnVectors) {
+ this.useDecimal64ColumnVectors = useDecimal64ColumnVectors;
+ }
+
public void setFileMetadata(ConsumerFileMetadata f) {
assert fileMetadata == null;
fileMetadata = f;
@@ -153,7 +159,7 @@ public class OrcEncodedDataConsumer
if (cvb.cols[idx] == null) {
// Orc store rows inside a root struct (hive writes it this way).
// When we populate column vectors we skip over the root struct.
- cvb.cols[idx] = createColumn(batchSchemas[idx], VectorizedRowBatch.DEFAULT_SIZE);
+ cvb.cols[idx] = createColumn(batchSchemas[idx], VectorizedRowBatch.DEFAULT_SIZE, useDecimal64ColumnVectors);
}
trace.logTreeReaderNextVector(idx);
@@ -217,10 +223,10 @@ public class OrcEncodedDataConsumer
TreeReaderFactory.Context context = new TreeReaderFactory.ReaderContext()
.setSchemaEvolution(evolution).skipCorrupt(skipCorrupt)
.writerTimeZone(stripeMetadata.getWriterTimezone())
- ;
+ .fileFormat(fileMetadata == null ? null : fileMetadata.getFileVersion());
this.batchSchemas = includes.getBatchReaderTypes(fileSchema);
StructTreeReader treeReader = EncodedTreeReaderFactory.createRootTreeReader(
- batchSchemas, stripeMetadata.getEncodings(), batch, codec, context);
+ batchSchemas, stripeMetadata.getEncodings(), batch, codec, context, useDecimal64ColumnVectors);
this.columnReaders = treeReader.getChildReaders();
if (LlapIoImpl.LOG.isDebugEnabled()) {
@@ -232,7 +238,7 @@ public class OrcEncodedDataConsumer
positionInStreams(columnReaders, batch.getBatchKey(), stripeMetadata);
}
- private ColumnVector createColumn(TypeDescription type, int batchSize) {
+ private ColumnVector createColumn(TypeDescription type, int batchSize, final boolean useDecimal64ColumnVectors) {
switch (type.getCategory()) {
case BOOLEAN:
case BYTE:
@@ -252,30 +258,34 @@ public class OrcEncodedDataConsumer
case TIMESTAMP:
return new TimestampColumnVector(batchSize);
case DECIMAL:
- return new DecimalColumnVector(batchSize, type.getPrecision(),
- type.getScale());
+ if (useDecimal64ColumnVectors && type.getPrecision() <= TypeDescription.MAX_DECIMAL64_PRECISION) {
+ return new Decimal64ColumnVector(batchSize, type.getPrecision(), type.getScale());
+ } else {
+ return new DecimalColumnVector(batchSize, type.getPrecision(), type.getScale());
+ }
case STRUCT: {
List<TypeDescription> subtypeIdxs = type.getChildren();
ColumnVector[] fieldVector = new ColumnVector[subtypeIdxs.size()];
- for(int i = 0; i < fieldVector.length; ++i) {
- fieldVector[i] = createColumn(subtypeIdxs.get(i), batchSize);
+ for (int i = 0; i < fieldVector.length; ++i) {
+ fieldVector[i] = createColumn(subtypeIdxs.get(i), batchSize, useDecimal64ColumnVectors);
}
return new StructColumnVector(batchSize, fieldVector);
}
case UNION: {
List<TypeDescription> subtypeIdxs = type.getChildren();
ColumnVector[] fieldVector = new ColumnVector[subtypeIdxs.size()];
- for(int i=0; i < fieldVector.length; ++i) {
- fieldVector[i] = createColumn(subtypeIdxs.get(i), batchSize);
+ for (int i = 0; i < fieldVector.length; ++i) {
+ fieldVector[i] = createColumn(subtypeIdxs.get(i), batchSize, useDecimal64ColumnVectors);
}
return new UnionColumnVector(batchSize, fieldVector);
}
case LIST:
- return new ListColumnVector(batchSize, createColumn(type.getChildren().get(0), batchSize));
+ return new ListColumnVector(batchSize, createColumn(type.getChildren().get(0), batchSize,
+ useDecimal64ColumnVectors));
case MAP:
List<TypeDescription> subtypeIdxs = type.getChildren();
- return new MapColumnVector(batchSize, createColumn(subtypeIdxs.get(0), batchSize),
- createColumn(subtypeIdxs.get(1), batchSize));
+ return new MapColumnVector(batchSize, createColumn(subtypeIdxs.get(0), batchSize, useDecimal64ColumnVectors),
+ createColumn(subtypeIdxs.get(1), batchSize, useDecimal64ColumnVectors));
default:
throw new IllegalArgumentException("LLAP does not support " + type.getCategory());
}
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index 2947c16..b76b0de 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@ -24,37 +24,17 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import org.apache.hadoop.hive.llap.counters.LlapIOCounters;
-import org.apache.orc.CompressionCodec;
-import org.apache.orc.OrcProto.BloomFilterIndex;
-import org.apache.orc.OrcProto.FileTail;
-import org.apache.orc.OrcProto.RowIndex;
-import org.apache.orc.OrcProto.Stream;
-import org.apache.orc.OrcProto.StripeStatistics;
-import org.apache.orc.TypeDescription;
-import org.apache.orc.impl.BufferChunk;
-import org.apache.orc.impl.DataReaderProperties;
-import org.apache.orc.impl.InStream;
-import org.apache.orc.impl.OrcCodecPool;
-import org.apache.orc.impl.OrcIndex;
-import org.apache.orc.impl.OrcTail;
-import org.apache.orc.impl.ReaderImpl;
-import org.apache.orc.impl.SchemaEvolution;
-import org.apache.orc.impl.WriterImpl;
-import org.apache.tez.common.counters.TezCounters;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.Pool;
import org.apache.hadoop.hive.common.Pool.PoolObjectHelper;
+import org.apache.hadoop.hive.common.io.Allocator;
import org.apache.hadoop.hive.common.io.Allocator.BufferObjectFactory;
import org.apache.hadoop.hive.common.io.DataCache;
-import org.apache.hadoop.hive.common.io.Allocator;
-import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamData;
import org.apache.hadoop.hive.common.io.DiskRange;
import org.apache.hadoop.hive.common.io.DiskRangeList;
+import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamData;
import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -65,38 +45,58 @@ import org.apache.hadoop.hive.llap.cache.BufferUsageManager;
import org.apache.hadoop.hive.llap.cache.LlapDataBuffer;
import org.apache.hadoop.hive.llap.cache.LowLevelCache;
import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority;
+import org.apache.hadoop.hive.llap.counters.LlapIOCounters;
import org.apache.hadoop.hive.llap.counters.QueryFragmentCounters;
import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl;
import org.apache.hadoop.hive.llap.io.decode.ColumnVectorProducer.Includes;
import org.apache.hadoop.hive.llap.io.decode.ColumnVectorProducer.SchemaEvolutionFactory;
import org.apache.hadoop.hive.llap.io.decode.OrcEncodedDataConsumer;
-import org.apache.hadoop.hive.llap.io.metadata.OrcFileMetadata;
import org.apache.hadoop.hive.llap.io.metadata.MetadataCache;
import org.apache.hadoop.hive.llap.io.metadata.MetadataCache.LlapBufferOrBuffers;
+import org.apache.hadoop.hive.llap.io.metadata.OrcFileMetadata;
import org.apache.hadoop.hive.llap.io.metadata.OrcStripeMetadata;
import org.apache.hadoop.hive.ql.io.HdfsUtils;
-import org.apache.orc.CompressionKind;
-import org.apache.orc.DataReader;
import org.apache.hadoop.hive.ql.io.orc.OrcFile;
import org.apache.hadoop.hive.ql.io.orc.OrcFile.ReaderOptions;
-import org.apache.orc.OrcConf;
import org.apache.hadoop.hive.ql.io.orc.OrcSplit;
-import org.apache.hadoop.hive.ql.io.orc.encoded.Reader;
import org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl;
import org.apache.hadoop.hive.ql.io.orc.encoded.EncodedOrcFile;
import org.apache.hadoop.hive.ql.io.orc.encoded.EncodedReader;
import org.apache.hadoop.hive.ql.io.orc.encoded.IoTrace;
import org.apache.hadoop.hive.ql.io.orc.encoded.OrcBatchKey;
+import org.apache.hadoop.hive.ql.io.orc.encoded.Reader;
import org.apache.hadoop.hive.ql.io.orc.encoded.Reader.OrcEncodedColumnBatch;
import org.apache.hadoop.hive.ql.io.orc.encoded.Reader.PoolFactory;
-import org.apache.orc.impl.RecordReaderUtils;
-import org.apache.orc.StripeInformation;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hive.common.util.FixedSizedObjectPool;
+import org.apache.orc.CompressionCodec;
+import org.apache.orc.CompressionKind;
+import org.apache.orc.DataReader;
+import org.apache.orc.OrcConf;
import org.apache.orc.OrcProto;
+import org.apache.orc.OrcProto.BloomFilterIndex;
+import org.apache.orc.OrcProto.FileTail;
+import org.apache.orc.OrcProto.RowIndex;
+import org.apache.orc.OrcProto.Stream;
+import org.apache.orc.OrcProto.StripeStatistics;
+import org.apache.orc.StripeInformation;
+import org.apache.orc.TypeDescription;
+import org.apache.orc.impl.BufferChunk;
+import org.apache.orc.impl.DataReaderProperties;
+import org.apache.orc.impl.InStream;
+import org.apache.orc.impl.OrcCodecPool;
+import org.apache.orc.impl.OrcIndex;
+import org.apache.orc.impl.OrcTail;
+import org.apache.orc.impl.ReaderImpl;
+import org.apache.orc.impl.RecordReaderUtils;
+import org.apache.orc.impl.SchemaEvolution;
+import org.apache.orc.impl.WriterImpl;
import org.apache.tez.common.CallableWithNdc;
+import org.apache.tez.common.counters.TezCounters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
@@ -231,6 +231,8 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
this.jobConf = jobConf;
// TODO: setFileMetadata could just create schema. Called in two places; clean up later.
this.evolution = sef.createSchemaEvolution(fileMetadata.getSchema());
+ consumer.setUseDecimal64ColumnVectors(HiveConf.getVar(jobConf,
+ ConfVars.HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED).equalsIgnoreCase("decimal_64"));
consumer.setFileMetadata(fileMetadata);
consumer.setSchemaEvolution(evolution);
}
@@ -569,7 +571,8 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
stripes.add(new ReaderImpl.StripeInformationImpl(stripeProto));
}
return new OrcFileMetadata(
- fileKey, tail.getFooter(), tail.getPostscript(), stats, stripes);
+ fileKey, tail.getFooter(), tail.getPostscript(), stats, stripes,
+ ReaderImpl.getFileVersion(tail.getPostscript().getVersionList()));
} finally {
// We don't need the buffer anymore.
metadataCache.decRefBuffer(tailBuffers);
@@ -586,7 +589,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
}
FileTail ft = orcReader.getFileTail();
return new OrcFileMetadata(fileKey, ft.getFooter(), ft.getPostscript(),
- orcReader.getOrcProtoStripeStatistics(), orcReader.getStripes());
+ orcReader.getOrcProtoStripeStatistics(), orcReader.getStripes(), orcReader.getFileVersion());
}
private OrcProto.StripeFooter buildStripeFooter(
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java
index bed5887..5b54af5 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/SerDeEncodedDataReader.java
@@ -221,6 +221,9 @@ public class SerDeEncodedDataReader extends CallableWithNdc<Void>
this.sourceSerDe = sourceSerDe;
this.reporter = reporter;
this.jobConf = jobConf;
+ final boolean useDecimal64ColumnVectors = HiveConf.getVar(jobConf, ConfVars
+ .HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED).equalsIgnoreCase("decimal_64");
+ consumer.setUseDecimal64ColumnVectors(useDecimal64ColumnVectors);
this.schema = schema;
this.writerIncludes = OrcInputFormat.genIncludedColumns(schema, columnIds);
SchemaEvolution evolution = new SchemaEvolution(schema, null,
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/VectorDeserializeOrcWriter.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/VectorDeserializeOrcWriter.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/VectorDeserializeOrcWriter.java
index de19b1d..ca6d696 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/VectorDeserializeOrcWriter.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/VectorDeserializeOrcWriter.java
@@ -20,14 +20,19 @@ package org.apache.hadoop.hive.llap.io.encoded;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
+import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.type.DataTypePhysicalVariation;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.llap.DebugUtils;
@@ -35,10 +40,12 @@ import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl;
import org.apache.hadoop.hive.llap.io.encoded.SerDeEncodedDataReader.CacheWriter;
import org.apache.hadoop.hive.llap.io.encoded.SerDeEncodedDataReader.DeserializerOrcWriter;
import org.apache.hadoop.hive.llap.io.encoded.SerDeEncodedDataReader.EncodingWriter;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorDeserializeRow;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedSupport;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
import org.apache.hadoop.hive.ql.io.orc.Writer;
import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -46,14 +53,20 @@ import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.hive.serde2.lazy.LazySerDeParameters;
import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
import org.apache.hadoop.hive.serde2.lazy.fast.LazySimpleDeserializeRead;
import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
+import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.io.BinaryComparable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.InputFormat;
@@ -113,7 +126,7 @@ class VectorDeserializeOrcWriter extends EncodingWriter implements Runnable {
}
LlapIoImpl.LOG.info("Creating VertorDeserializeOrcWriter for " + path);
return new VectorDeserializeOrcWriter(
- daemonConf, tblProps, sourceOi, sourceIncludes, cacheIncludes, allocSize);
+ jobConf, tblProps, sourceOi, sourceIncludes, cacheIncludes, allocSize);
}
private VectorDeserializeOrcWriter(Configuration conf, Properties tblProps,
@@ -121,12 +134,12 @@ class VectorDeserializeOrcWriter extends EncodingWriter implements Runnable {
int allocSize) throws IOException {
super(sourceOi, allocSize);
// See also: the usage of VectorDeserializeType, for binary. For now, we only want text.
- this.vrbCtx = createVrbCtx(sourceOi);
+ this.vrbCtx = createVrbCtx(sourceOi, tblProps, conf);
this.sourceIncludes = sourceIncludes;
this.cacheIncludes = cacheIncludes;
this.sourceBatch = vrbCtx.createVectorizedRowBatch();
deserializeRead = new LazySimpleDeserializeRead(vrbCtx.getRowColumnTypeInfos(),
- /* useExternalBuffer */ true, createSerdeParams(conf, tblProps));
+ vrbCtx.getRowdataTypePhysicalVariations(),/* useExternalBuffer */ true, createSerdeParams(conf, tblProps));
vectorDeserializeRow = new VectorDeserializeRow<LazySimpleDeserializeRead>(deserializeRead);
int colCount = vrbCtx.getRowColumnTypeInfos().length;
boolean[] includes = null;
@@ -192,13 +205,41 @@ class VectorDeserializeOrcWriter extends EncodingWriter implements Runnable {
this.orcThread.start();
}
- private static VectorizedRowBatchCtx createVrbCtx(StructObjectInspector oi) throws IOException {
+ private static VectorizedRowBatchCtx createVrbCtx(StructObjectInspector oi, final Properties tblProps,
+ final Configuration conf) throws IOException {
+ final boolean useDecimal64ColumnVectors = HiveConf.getVar(conf, ConfVars
+ .HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED).equalsIgnoreCase("decimal_64");
+ final String serde = tblProps.getProperty(serdeConstants.SERIALIZATION_LIB);
+ final String inputFormat = tblProps.getProperty(hive_metastoreConstants.FILE_INPUT_FORMAT);
+ final boolean isTextFormat = inputFormat != null && inputFormat.equals(TextInputFormat.class.getName()) &&
+ serde != null && serde.equals(LazySimpleSerDe.class.getName());
+ List<DataTypePhysicalVariation> dataTypePhysicalVariations = new ArrayList<>();
+ if (isTextFormat) {
+ StructTypeInfo structTypeInfo = (StructTypeInfo) TypeInfoUtils.getTypeInfoFromObjectInspector(oi);
+ int dataColumnCount = structTypeInfo.getAllStructFieldTypeInfos().size();
+ for (int i = 0; i < dataColumnCount; i++) {
+ DataTypePhysicalVariation dataTypePhysicalVariation = DataTypePhysicalVariation.NONE;
+ if (useDecimal64ColumnVectors) {
+ TypeInfo typeInfo = structTypeInfo.getAllStructFieldTypeInfos().get(i);
+ if (typeInfo instanceof DecimalTypeInfo) {
+ DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo;
+ if (HiveDecimalWritable.isPrecisionDecimal64(decimalTypeInfo.precision())) {
+ dataTypePhysicalVariation = DataTypePhysicalVariation.DECIMAL_64;
+ }
+ }
+ }
+ dataTypePhysicalVariations.add(dataTypePhysicalVariation);
+ }
+ }
VectorizedRowBatchCtx vrbCtx = new VectorizedRowBatchCtx();
try {
vrbCtx.init(oi, new String[0]);
} catch (HiveException e) {
throw new IOException(e);
}
+ if (!dataTypePhysicalVariations.isEmpty()) {
+ vrbCtx.setRowDataTypePhysicalVariations(dataTypePhysicalVariations.toArray(new DataTypePhysicalVariation[0]));
+ }
return vrbCtx;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/ConsumerFileMetadata.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/ConsumerFileMetadata.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/ConsumerFileMetadata.java
index 89ad4aa..d6b16ef 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/ConsumerFileMetadata.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/ConsumerFileMetadata.java
@@ -21,6 +21,7 @@ import java.util.List;
import org.apache.orc.CompressionKind;
import org.apache.orc.FileFormatException;
+import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto.Type;
import org.apache.orc.TypeDescription;
@@ -29,4 +30,5 @@ public interface ConsumerFileMetadata {
CompressionKind getCompressionKind();
List<Type> getTypes();
TypeDescription getSchema() throws FileFormatException;
+ OrcFile.Version getFileVersion();
}
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java
index 5cd6f9f..5eb713c 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java
@@ -50,9 +50,10 @@ public final class OrcFileMetadata implements FileMetadata, ConsumerFileMetadata
private final long contentLength;
private final long numberOfRows;
private final boolean isOriginalFormat;
+ private final OrcFile.Version fileVersion;
public OrcFileMetadata(Object fileKey, OrcProto.Footer footer, OrcProto.PostScript ps,
- List<StripeStatistics> stats, List<StripeInformation> stripes) {
+ List<StripeStatistics> stats, List<StripeInformation> stripes, final OrcFile.Version fileVersion) {
this.stripeStats = stats;
this.compressionKind = CompressionKind.valueOf(ps.getCompression().name());
this.compressionBufferSize = (int)ps.getCompressionBlockSize();
@@ -67,6 +68,7 @@ public final class OrcFileMetadata implements FileMetadata, ConsumerFileMetadata
this.numberOfRows = footer.getNumberOfRows();
this.fileStats = footer.getStatisticsList();
this.fileKey = fileKey;
+ this.fileVersion = fileVersion;
}
// FileMetadata
@@ -163,4 +165,9 @@ public final class OrcFileMetadata implements FileMetadata, ConsumerFileMetadata
public TypeDescription getSchema() throws FileFormatException {
return OrcUtils.convertTypeFromProtobuf(this.types, 0);
}
+
+ @Override
+ public OrcFile.Version getFileVersion() {
+ return fileVersion;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index 2246901..183fae5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@ -214,8 +214,8 @@ public class FetchOperator implements Serializable {
private static final Map<String, InputFormat> inputFormats = new HashMap<String, InputFormat>();
@SuppressWarnings("unchecked")
- static InputFormat getInputFormatFromCache(
- Class<? extends InputFormat> inputFormatClass, JobConf conf) throws IOException {
+ public static InputFormat getInputFormatFromCache(
+ Class<? extends InputFormat> inputFormatClass, Configuration conf) throws IOException {
if (Configurable.class.isAssignableFrom(inputFormatClass) ||
JobConfigurable.class.isAssignableFrom(inputFormatClass)) {
return ReflectionUtil.newInstance(inputFormatClass, conf);
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 9ddb136..84a0a3a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -584,8 +584,8 @@ public class VectorizationContext {
// Re-use an existing, available column of the same required type.
if (usedOutputColumns.contains(i) ||
- !(scratchVectorTypeNames)[i].equalsIgnoreCase(columnType) &&
- scratchDataTypePhysicalVariations[i] == dataTypePhysicalVariation) {
+ !(scratchVectorTypeNames[i].equalsIgnoreCase(columnType) &&
+ scratchDataTypePhysicalVariations[i] == dataTypePhysicalVariation)) {
continue;
}
//Use i
@@ -874,6 +874,7 @@ public class VectorizationContext {
LOG.debug("Input Expression = " + exprDesc.toString()
+ ", Vectorized Expression = " + ve.toString());
}
+
return ve;
}
@@ -1965,7 +1966,7 @@ public class VectorizationContext {
return cleaned;
}
- private VectorExpression instantiateExpression(Class<?> vclass, TypeInfo returnTypeInfo,
+ public VectorExpression instantiateExpression(Class<?> vclass, TypeInfo returnTypeInfo,
DataTypePhysicalVariation returnDataTypePhysicalVariation, Object...args)
throws HiveException {
VectorExpression ve = null;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedInputFormatInterface.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedInputFormatInterface.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedInputFormatInterface.java
index e74b185..8ee59e4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedInputFormatInterface.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedInputFormatInterface.java
@@ -24,4 +24,5 @@ package org.apache.hadoop.hive.ql.exec.vector;
*/
public interface VectorizedInputFormatInterface {
+ VectorizedSupport.Support[] getSupportedFeatures();
}
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
index 6588385..ffbfb6f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
@@ -163,6 +163,11 @@ public class VectorizedRowBatchCtx {
return rowDataTypePhysicalVariations;
}
+ public void setRowDataTypePhysicalVariations(
+ final DataTypePhysicalVariation[] rowDataTypePhysicalVariations) {
+ this.rowDataTypePhysicalVariations = rowDataTypePhysicalVariations;
+ }
+
public int[] getDataColumnNums() {
return dataColumnNums;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java
index d8a3cac..8bf990a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java
@@ -82,6 +82,7 @@ public class VectorInBloomFilterColDynamicValue extends VectorExpression {
// Instantiate BloomFilterCheck based on input column type
switch (colVectorType) {
case LONG:
+ case DECIMAL_64:
bfCheck = new LongBloomFilterCheck();
break;
case DOUBLE:
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java
index 18bacc5..fdb067f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java
@@ -107,6 +107,7 @@ public class VectorUDAFBloomFilter extends VectorAggregateExpression {
}
switch (colVectorType) {
case LONG:
+ case DECIMAL_64:
valueProcessor = new ValueProcessorLong();
break;
case DOUBLE:
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
index 5b2cb4c..e011657 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/udf/VectorUDFAdaptor.java
@@ -235,4 +235,12 @@ public class VectorUDFAdaptor extends VectorExpression {
public VectorExpressionDescriptor.Descriptor getDescriptor() {
return (new VectorExpressionDescriptor.Builder()).build();
}
+
+ public VectorUDFArgDesc[] getArgDescs() {
+ return argDescs;
+ }
+
+ public void setArgDescs(final VectorUDFArgDesc[] argDescs) {
+ this.argDescs = argDescs;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/io/BatchToRowReader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/BatchToRowReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/BatchToRowReader.java
index 2b005c4..c88ee99 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/BatchToRowReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/BatchToRowReader.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hive.llap.DebugUtils;
import java.util.Arrays;
+import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -416,7 +417,12 @@ public abstract class BatchToRowReader<StructType, UnionType>
} else {
result = (HiveDecimalWritable) previous;
}
- result.set(((DecimalColumnVector) vector).vector[row]);
+ if (vector instanceof Decimal64ColumnVector) {
+ long value = ((Decimal64ColumnVector) vector).vector[row];
+ result.deserialize64(value, ((Decimal64ColumnVector) vector).scale);
+ } else {
+ result.set(((DecimalColumnVector) vector).vector[row]);
+ }
return result;
} else {
return null;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/io/NullRowsInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/NullRowsInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/NullRowsInputFormat.java
index e632d43..6434414 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/NullRowsInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/NullRowsInputFormat.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities;
import java.io.IOException;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedSupport;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.io.NullWritable;
@@ -50,6 +51,11 @@ public class NullRowsInputFormat implements InputFormat<NullWritable, NullWritab
static final int MAX_ROW = 100; // to prevent infinite loop
static final Logger LOG = LoggerFactory.getLogger(NullRowsRecordReader.class.getName());
+ @Override
+ public VectorizedSupport.Support[] getSupportedFeatures() {
+ return null;
+ }
+
public static class DummyInputSplit extends FileSplit {
@SuppressWarnings("unused") // Serialization ctor.
private DummyInputSplit() {
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index b6f92e3..3c11847 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedSupport;
import org.apache.hadoop.hive.ql.io.AcidInputFormat;
import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
import org.apache.hadoop.hive.ql.io.AcidUtils;
@@ -161,6 +162,11 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
SelfDescribingInputFormatInterface, AcidInputFormat<NullWritable, OrcStruct>,
CombineHiveInputFormat.AvoidSplitCombination, BatchToRowInputFormat {
+ @Override
+ public VectorizedSupport.Support[] getSupportedFeatures() {
+ return new VectorizedSupport.Support[] {VectorizedSupport.Support.DECIMAL_64};
+ }
+
static enum SplitStrategyKind {
HYBRID,
BI,
@@ -328,7 +334,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
List<OrcProto.Type> types = OrcUtils.getOrcTypes(schema);
options.include(genIncludedColumns(schema, conf));
setSearchArgument(options, types, conf, isOriginal);
- return file.rowsOptions(options);
+ return file.rowsOptions(options, conf);
}
public static boolean isOriginal(Reader file) {
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
index 9d954ca..6571a24 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
@@ -23,6 +23,7 @@ import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator;
import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -231,16 +232,17 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
* @param maxKey only return keys less than or equal to maxKey if it is
* non-null
* @param options options to provide to read the rows.
+ * @param conf
* @throws IOException
*/
@VisibleForTesting
ReaderPairAcid(ReaderKey key, Reader reader,
- RecordIdentifier minKey, RecordIdentifier maxKey,
- ReaderImpl.Options options) throws IOException {
+ RecordIdentifier minKey, RecordIdentifier maxKey,
+ ReaderImpl.Options options, final Configuration conf) throws IOException {
this.reader = reader;
this.key = key;
// TODO use stripe statistics to jump over stripes
- recordReader = reader.rowsOptions(options);
+ recordReader = reader.rowsOptions(options, conf);
this.minKey = minKey;
this.maxKey = maxKey;
// advance the reader until we reach the minimum key
@@ -440,7 +442,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
RecordIdentifier newMinKey = minKey;
RecordIdentifier newMaxKey = maxKey;
- recordReader = reader.rowsOptions(options);
+ recordReader = reader.rowsOptions(options, conf);
/**
* Logically each bucket consists of 0000_0, 0000_0_copy_1... 0000_0_copy_N. etc We don't
* know N a priori so if this is true, then the current split is from 0000_0_copy_N file.
@@ -589,7 +591,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
throw new IllegalStateException("No 'original' files found for bucketId=" + this.bucketId +
" in " + mergerOptions.getRootPath());
}
- recordReader = getReader().rowsOptions(options);
+ recordReader = getReader().rowsOptions(options, conf);
next(nextRecord());//load 1st row
}
@Override public RecordReader getRecordReader() {
@@ -623,7 +625,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
nextRecord = null;
return;
}
- recordReader = reader.rowsOptions(options);
+ recordReader = reader.rowsOptions(options, conf);
}
}
}
@@ -1043,7 +1045,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
//required (on Tez) that base_x/ doesn't have a file for 'bucket'
reader = OrcFile.createReader(bucketPath, OrcFile.readerOptions(conf));
pair = new ReaderPairAcid(baseKey, reader, keyInterval.getMinKey(), keyInterval.getMaxKey(),
- eventOptions);
+ eventOptions, conf);
}
else {
pair = new EmptyReaderPair();
@@ -1053,7 +1055,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
else {
assert reader != null : "no reader? " + mergerOptions.getRootPath();
pair = new ReaderPairAcid(baseKey, reader, keyInterval.getMinKey(), keyInterval.getMaxKey(),
- eventOptions);
+ eventOptions, conf);
}
}
minKey = pair.getMinKey();
@@ -1113,7 +1115,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
//HIVE-17320: we should compute a SARG to push down min/max key to delete_delta
Reader deltaReader = OrcFile.createReader(deltaFile, OrcFile.readerOptions(conf));
ReaderPair deltaPair = new ReaderPairAcid(key, deltaReader, minKey, maxKey,
- deltaEventOptions);
+ deltaEventOptions, conf);
if (deltaPair.nextRecord() != null) {
ensurePutReader(key, deltaPair);
key = new ReaderKey();
@@ -1128,7 +1130,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
assert length >= 0;
Reader deltaReader = OrcFile.createReader(deltaFile, OrcFile.readerOptions(conf).maxLength(length));
//must get statementId from file name since Acid 1.0 doesn't write it into bucketProperty
- ReaderPairAcid deltaPair = new ReaderPairAcid(key, deltaReader, minKey, maxKey, deltaEventOptions);
+ ReaderPairAcid deltaPair = new ReaderPairAcid(key, deltaReader, minKey, maxKey, deltaEventOptions, conf);
if (deltaPair.nextRecord() != null) {
ensurePutReader(key, deltaPair);
key = new ReaderKey();
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java
index 7485e60..8fd9b90 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.io.orc;
import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -55,7 +56,16 @@ public interface Reader extends org.apache.orc.Reader {
* @throws IOException
*/
RecordReader rowsOptions(Options options) throws IOException;
-
+
+ /**
+ * Create a RecordReader that reads everything with the given options.
+ * @param options the options to use
+ * @param conf conf object
+ * @return a new RecordReader
+ * @throws IOException
+ */
+ RecordReader rowsOptions(Options options, Configuration conf) throws IOException;
+
/**
* Create a RecordReader that will scan the entire file.
* This is a legacy method and rowsOptions is preferred.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
index 1a6db1f..171b02b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.io.orc;
import java.io.IOException;
import java.nio.ByteBuffer;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -73,11 +74,17 @@ public class ReaderImpl extends org.apache.orc.impl.ReaderImpl
@Override
public RecordReader rowsOptions(Options options) throws IOException {
+ return rowsOptions(options, null);
+ }
+
+ @Override
+ public RecordReader rowsOptions(Options options, Configuration conf) throws IOException {
LOG.info("Reading ORC rows from " + path + " with " + options);
- return new RecordReaderImpl(this, options);
+ return new RecordReaderImpl(this, options, conf);
}
+
@Override
public RecordReader rows(boolean[] include) throws IOException {
return rowsOptions(new Options().include(include));
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
index 5b001a0..c6fe4fc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
@@ -23,8 +23,11 @@ import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
@@ -48,6 +51,7 @@ import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
+import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -60,9 +64,15 @@ public class RecordReaderImpl extends org.apache.orc.impl.RecordReaderImpl
private long baseRow;
protected RecordReaderImpl(ReaderImpl fileReader,
- Reader.Options options) throws IOException {
+ Reader.Options options, final Configuration conf) throws IOException {
super(fileReader, options);
- batch = this.schema.createRowBatch();
+ final boolean useDecimal64ColumnVectors = conf != null && HiveConf.getVar(conf,
+ HiveConf.ConfVars.HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED).equalsIgnoreCase("decimal_64");
+ if (useDecimal64ColumnVectors){
+ batch = this.schema.createRowBatchV2();
+ } else {
+ batch = this.schema.createRowBatch();
+ }
rowInBatch = 0;
}
@@ -80,8 +90,8 @@ public class RecordReaderImpl extends org.apache.orc.impl.RecordReaderImpl
return true;
}
- public VectorizedRowBatch createRowBatch() {
- return this.schema.createRowBatch();
+ public VectorizedRowBatch createRowBatch(boolean useDecimal64) {
+ return useDecimal64 ? this.schema.createRowBatchV2() : this.schema.createRowBatch();
}
@Override
@@ -393,7 +403,12 @@ public class RecordReaderImpl extends org.apache.orc.impl.RecordReaderImpl
} else {
result = (HiveDecimalWritable) previous;
}
- result.set(((DecimalColumnVector) vector).vector[row]);
+ if (vector instanceof Decimal64ColumnVector) {
+ long value = ((Decimal64ColumnVector) vector).vector[row];
+ result.deserialize64(value, ((Decimal64ColumnVector) vector).scale);
+ } else {
+ result.set(((DecimalColumnVector) vector).vector[row]);
+ }
return result;
} else {
return null;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
index 66ffcae..1a91d4a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
@@ -112,7 +112,7 @@ public class VectorizedOrcAcidRowBatchReader
final Reader reader = OrcInputFormat.createOrcReaderForSplit(conf, (OrcSplit) inputSplit);
// Careful with the range here now, we do not want to read the whole base file like deltas.
- innerReader = reader.rowsOptions(readerOptions.range(offset, length));
+ innerReader = reader.rowsOptions(readerOptions.range(offset, length), conf);
baseReader = new org.apache.hadoop.mapred.RecordReader<NullWritable, VectorizedRowBatch>() {
@Override
@@ -145,7 +145,13 @@ public class VectorizedOrcAcidRowBatchReader
return innerReader.getProgress();
}
};
- this.vectorizedRowBatchBase = ((RecordReaderImpl) innerReader).createRowBatch();
+ final boolean useDecimal64ColumnVectors = HiveConf
+ .getVar(conf, ConfVars.HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED).equalsIgnoreCase("decimal_64");
+ if (useDecimal64ColumnVectors) {
+ this.vectorizedRowBatchBase = ((RecordReaderImpl) innerReader).createRowBatch(true);
+ } else {
+ this.vectorizedRowBatchBase = ((RecordReaderImpl) innerReader).createRowBatch(false);
+ }
}
/**
@@ -864,11 +870,17 @@ public class VectorizedOrcAcidRowBatchReader
private final Reader reader;
DeleteReaderValue(Reader deleteDeltaReader, Reader.Options readerOptions, int bucket,
- ValidWriteIdList validWriteIdList, boolean isBucketedTable) throws IOException {
+ ValidWriteIdList validWriteIdList, boolean isBucketedTable, final JobConf conf) throws IOException {
this.reader = deleteDeltaReader;
- this.recordReader = deleteDeltaReader.rowsOptions(readerOptions);
+ this.recordReader = deleteDeltaReader.rowsOptions(readerOptions, conf);
this.bucketForSplit = bucket;
- this.batch = deleteDeltaReader.getSchema().createRowBatch();
+ final boolean useDecimal64ColumnVector = HiveConf.getVar(conf, ConfVars
+ .HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED).equalsIgnoreCase("decimal_64");
+ if (useDecimal64ColumnVector) {
+ this.batch = deleteDeltaReader.getSchema().createRowBatchV2();
+ } else {
+ this.batch = deleteDeltaReader.getSchema().createRowBatch();
+ }
if (!recordReader.nextBatch(batch)) { // Read the first batch.
this.batch = null; // Oh! the first batch itself was null. Close the reader.
}
@@ -1067,7 +1079,7 @@ public class VectorizedOrcAcidRowBatchReader
throw new DeleteEventsOverflowMemoryException();
}
DeleteReaderValue deleteReaderValue = new DeleteReaderValue(deleteDeltaReader,
- readerOptions, bucket, validWriteIdList, isBucketedTable);
+ readerOptions, bucket, validWriteIdList, isBucketedTable, conf);
DeleteRecordKey deleteRecordKey = new DeleteRecordKey();
if (deleteReaderValue.next(deleteRecordKey)) {
sortMerger.put(deleteRecordKey, deleteReaderValue);
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
index c581bba..892fcc0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedSupport;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.InputFormatChecker;
import org.apache.hadoop.hive.ql.io.SelfDescribingInputFormatInterface;
@@ -99,7 +100,7 @@ public class VectorizedOrcInputFormat extends FileInputFormat<NullWritable, Vect
options.include(OrcInputFormat.genIncludedColumns(schema, conf));
OrcInputFormat.setSearchArgument(options, types, conf, true);
- this.reader = file.rowsOptions(options);
+ this.reader = file.rowsOptions(options, conf);
int partitionColumnCount = rbCtx.getPartitionColumnCount();
if (partitionColumnCount > 0) {
@@ -204,4 +205,9 @@ public class VectorizedOrcInputFormat extends FileInputFormat<NullWritable, Vect
}
return true;
}
+
+ @Override
+ public VectorizedSupport.Support[] getSupportedFeatures() {
+ return new VectorizedSupport.Support[] {VectorizedSupport.Support.DECIMAL_64};
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
index 71682af..91a01e9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
@@ -24,6 +24,8 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -63,6 +65,7 @@ import org.apache.hadoop.io.Text;
import com.google.common.annotations.VisibleForTesting;
import org.apache.orc.PhysicalWriter;
+import org.apache.orc.TypeDescription;
/**
* An ORC file writer. The file is divided into stripes, which is the natural
@@ -93,7 +96,15 @@ public class WriterImpl extends org.apache.orc.impl.WriterImpl implements Writer
OrcFile.WriterOptions opts) throws IOException {
super(fs, path, opts);
this.inspector = opts.getInspector();
- this.internalBatch = opts.getSchema().createRowBatch(opts.getBatchSize());
+ boolean useDecimal64ColumnVectors = opts.getConfiguration() != null &&
+ HiveConf.getVar(opts.getConfiguration(), HiveConf.ConfVars.HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED)
+ .equalsIgnoreCase("decimal_64");
+ if (useDecimal64ColumnVectors) {
+ this.internalBatch = opts.getSchema().createRowBatch(TypeDescription.RowBatchVersion.USE_DECIMAL64,
+ opts.getBatchSize());
+ } else {
+ this.internalBatch = opts.getSchema().createRowBatch(opts.getBatchSize());
+ }
this.fields = initializeFieldsFromOi(inspector);
}
@@ -207,9 +218,15 @@ public class WriterImpl extends org.apache.orc.impl.WriterImpl implements Writer
break;
}
case DECIMAL: {
- DecimalColumnVector vector = (DecimalColumnVector) column;
- vector.set(rowId, ((HiveDecimalObjectInspector) inspector)
+ if (column instanceof Decimal64ColumnVector) {
+ Decimal64ColumnVector vector = (Decimal64ColumnVector) column;
+ vector.set(rowId, ((HiveDecimalObjectInspector) inspector)
+ .getPrimitiveWritableObject(obj));
+ } else {
+ DecimalColumnVector vector = (DecimalColumnVector) column;
+ vector.set(rowId, ((HiveDecimalObjectInspector) inspector)
.getPrimitiveWritableObject(obj));
+ }
break;
}
}
[19/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
index f1db9af..d739408 100644
--- a/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_leftsemi_mapjoin.q.out
@@ -3372,8 +3372,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3402,8 +3402,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3485,8 +3485,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3515,8 +3515,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3600,8 +3600,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3630,8 +3630,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3710,8 +3710,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3740,8 +3740,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3828,8 +3828,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3858,8 +3858,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3941,8 +3941,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3971,8 +3971,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4054,8 +4054,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4084,8 +4084,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4164,8 +4164,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4191,8 +4191,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4273,8 +4273,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4303,8 +4303,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4399,8 +4399,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4429,8 +4429,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4513,8 +4513,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4535,8 +4535,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4565,8 +4565,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4658,8 +4658,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4688,8 +4688,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4779,8 +4779,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4809,8 +4809,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4839,8 +4839,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4929,8 +4929,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4948,8 +4948,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4975,8 +4975,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5072,8 +5072,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5091,8 +5091,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5118,8 +5118,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5219,8 +5219,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5246,8 +5246,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5265,8 +5265,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5366,8 +5366,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5393,8 +5393,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5412,8 +5412,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5515,8 +5515,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5542,8 +5542,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5561,8 +5561,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5688,8 +5688,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5718,8 +5718,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5737,8 +5737,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5852,8 +5852,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5882,8 +5882,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5972,8 +5972,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6035,8 +6035,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6174,8 +6174,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6237,8 +6237,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6378,8 +6378,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6441,8 +6441,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6582,8 +6582,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6645,8 +6645,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6789,8 +6789,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6852,8 +6852,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6992,8 +6992,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -7059,8 +7059,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -7199,8 +7199,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -7266,8 +7266,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -7403,8 +7403,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -7462,8 +7462,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -7600,8 +7600,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -7663,8 +7663,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -7815,8 +7815,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -7879,8 +7879,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -8028,8 +8028,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -8072,8 +8072,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -8135,8 +8135,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -8284,8 +8284,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -8347,8 +8347,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -8497,8 +8497,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -8560,8 +8560,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -8623,8 +8623,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -8768,8 +8768,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -8804,8 +8804,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -8860,8 +8860,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -8998,8 +8998,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -9034,8 +9034,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -9090,8 +9090,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -9248,8 +9248,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -9304,8 +9304,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -9340,8 +9340,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -9498,8 +9498,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -9554,8 +9554,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -9590,8 +9590,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -9750,8 +9750,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -9806,8 +9806,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -9842,8 +9842,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -10053,8 +10053,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -10116,8 +10116,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -10152,8 +10152,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -10329,8 +10329,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -10392,8 +10392,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -10492,8 +10492,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -10555,8 +10555,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -10695,8 +10695,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -10758,8 +10758,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -10900,8 +10900,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -10963,8 +10963,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -11105,8 +11105,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -11168,8 +11168,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -11313,8 +11313,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -11376,8 +11376,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -11516,8 +11516,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -11584,8 +11584,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -11724,8 +11724,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -11792,8 +11792,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -11929,8 +11929,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -11989,8 +11989,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -12128,8 +12128,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -12191,8 +12191,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -12344,8 +12344,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -12408,8 +12408,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -12557,8 +12557,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -12601,8 +12601,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -12664,8 +12664,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -12814,8 +12814,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -12877,8 +12877,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -13027,8 +13027,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -13090,8 +13090,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -13153,8 +13153,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -13298,8 +13298,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -13334,8 +13334,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -13390,8 +13390,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -13528,8 +13528,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -13564,8 +13564,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -13620,8 +13620,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -13778,8 +13778,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -13834,8 +13834,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -13870,8 +13870,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -14028,8 +14028,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -14084,8 +14084,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -14120,8 +14120,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -14280,8 +14280,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -14336,8 +14336,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -14372,8 +14372,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -14585,8 +14585,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -14648,8 +14648,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -14684,8 +14684,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -14862,8 +14862,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -14925,8 +14925,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -15025,8 +15025,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -15088,8 +15088,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -15228,8 +15228,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -15291,8 +15291,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -15433,8 +15433,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -15496,8 +15496,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -15638,8 +15638,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -15701,8 +15701,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -15846,8 +15846,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -15909,8 +15909,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -16049,8 +16049,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -16117,8 +16117,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -16257,8 +16257,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -16325,8 +16325,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -16462,8 +16462,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -16522,8 +16522,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -16661,8 +16661,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -16724,8 +16724,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -16877,8 +16877,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
-
<TRUNCATED>
[63/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index 1d57aee..d3449a7 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -105,7 +105,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService
virtual void get_partitions_by_names(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & names) = 0;
virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) = 0;
virtual void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts) = 0;
- virtual void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context) = 0;
+ virtual void alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req) = 0;
virtual void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) = 0;
virtual void rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const Partition& new_part) = 0;
virtual bool partition_name_has_valid_characters(const std::vector<std::string> & part_vals, const bool throw_exception) = 0;
@@ -516,7 +516,7 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
void alter_partitions(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<Partition> & /* new_parts */) {
return;
}
- void alter_partitions_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<Partition> & /* new_parts */, const EnvironmentContext& /* environment_context */) {
+ void alter_partitions_with_environment_context(AlterPartitionsResponse& /* _return */, const AlterPartitionsRequest& /* req */) {
return;
}
void alter_partition_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const Partition& /* new_part */, const EnvironmentContext& /* environment_context */) {
@@ -11637,11 +11637,8 @@ class ThriftHiveMetastore_alter_partitions_presult {
};
typedef struct _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset {
- _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset() : db_name(false), tbl_name(false), new_parts(false), environment_context(false) {}
- bool db_name :1;
- bool tbl_name :1;
- bool new_parts :1;
- bool environment_context :1;
+ _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset() : req(false) {}
+ bool req :1;
} _ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset;
class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
@@ -11649,34 +11646,19 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
ThriftHiveMetastore_alter_partitions_with_environment_context_args(const ThriftHiveMetastore_alter_partitions_with_environment_context_args&);
ThriftHiveMetastore_alter_partitions_with_environment_context_args& operator=(const ThriftHiveMetastore_alter_partitions_with_environment_context_args&);
- ThriftHiveMetastore_alter_partitions_with_environment_context_args() : db_name(), tbl_name() {
+ ThriftHiveMetastore_alter_partitions_with_environment_context_args() {
}
virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_args() throw();
- std::string db_name;
- std::string tbl_name;
- std::vector<Partition> new_parts;
- EnvironmentContext environment_context;
+ AlterPartitionsRequest req;
_ThriftHiveMetastore_alter_partitions_with_environment_context_args__isset __isset;
- void __set_db_name(const std::string& val);
-
- void __set_tbl_name(const std::string& val);
-
- void __set_new_parts(const std::vector<Partition> & val);
-
- void __set_environment_context(const EnvironmentContext& val);
+ void __set_req(const AlterPartitionsRequest& val);
bool operator == (const ThriftHiveMetastore_alter_partitions_with_environment_context_args & rhs) const
{
- if (!(db_name == rhs.db_name))
- return false;
- if (!(tbl_name == rhs.tbl_name))
- return false;
- if (!(new_parts == rhs.new_parts))
- return false;
- if (!(environment_context == rhs.environment_context))
+ if (!(req == rhs.req))
return false;
return true;
}
@@ -11697,17 +11679,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_pargs {
virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_pargs() throw();
- const std::string* db_name;
- const std::string* tbl_name;
- const std::vector<Partition> * new_parts;
- const EnvironmentContext* environment_context;
+ const AlterPartitionsRequest* req;
uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
};
typedef struct _ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset {
- _ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset() : o1(false), o2(false) {}
+ _ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset() : success(false), o1(false), o2(false) {}
+ bool success :1;
bool o1 :1;
bool o2 :1;
} _ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset;
@@ -11721,17 +11701,22 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result {
}
virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_result() throw();
+ AlterPartitionsResponse success;
InvalidOperationException o1;
MetaException o2;
_ThriftHiveMetastore_alter_partitions_with_environment_context_result__isset __isset;
+ void __set_success(const AlterPartitionsResponse& val);
+
void __set_o1(const InvalidOperationException& val);
void __set_o2(const MetaException& val);
bool operator == (const ThriftHiveMetastore_alter_partitions_with_environment_context_result & rhs) const
{
+ if (!(success == rhs.success))
+ return false;
if (!(o1 == rhs.o1))
return false;
if (!(o2 == rhs.o2))
@@ -11750,7 +11735,8 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result {
};
typedef struct _ThriftHiveMetastore_alter_partitions_with_environment_context_presult__isset {
- _ThriftHiveMetastore_alter_partitions_with_environment_context_presult__isset() : o1(false), o2(false) {}
+ _ThriftHiveMetastore_alter_partitions_with_environment_context_presult__isset() : success(false), o1(false), o2(false) {}
+ bool success :1;
bool o1 :1;
bool o2 :1;
} _ThriftHiveMetastore_alter_partitions_with_environment_context_presult__isset;
@@ -11760,6 +11746,7 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_presult {
virtual ~ThriftHiveMetastore_alter_partitions_with_environment_context_presult() throw();
+ AlterPartitionsResponse* success;
InvalidOperationException o1;
MetaException o2;
@@ -26472,9 +26459,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public
void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts);
void send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts);
void recv_alter_partitions();
- void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context);
- void send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context);
- void recv_alter_partitions_with_environment_context();
+ void alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req);
+ void send_alter_partitions_with_environment_context(const AlterPartitionsRequest& req);
+ void recv_alter_partitions_with_environment_context(AlterPartitionsResponse& _return);
void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context);
void send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context);
void recv_alter_partition_with_environment_context();
@@ -28100,13 +28087,14 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi
ifaces_[i]->alter_partitions(db_name, tbl_name, new_parts);
}
- void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context) {
+ void alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req) {
size_t sz = ifaces_.size();
size_t i = 0;
for (; i < (sz - 1); ++i) {
- ifaces_[i]->alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context);
+ ifaces_[i]->alter_partitions_with_environment_context(_return, req);
}
- ifaces_[i]->alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context);
+ ifaces_[i]->alter_partitions_with_environment_context(_return, req);
+ return;
}
void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) {
@@ -29559,9 +29547,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf
void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts);
int32_t send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts);
void recv_alter_partitions(const int32_t seqid);
- void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context);
- int32_t send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context);
- void recv_alter_partitions_with_environment_context(const int32_t seqid);
+ void alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req);
+ int32_t send_alter_partitions_with_environment_context(const AlterPartitionsRequest& req);
+ void recv_alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const int32_t seqid);
void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context);
int32_t send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context);
void recv_alter_partition_with_environment_context(const int32_t seqid);
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
index d45ec81..c6b8204 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -437,7 +437,7 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
printf("alter_partitions\n");
}
- void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context) {
+ void alter_partitions_with_environment_context(AlterPartitionsResponse& _return, const AlterPartitionsRequest& req) {
// Your implementation goes here
printf("alter_partitions_with_environment_context\n");
}
[33/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java
index c9078be..9302791 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedTreeReaderFactory.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hive.ql.io.orc.encoded;
+import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
+import org.apache.orc.OrcFile;
import org.apache.orc.impl.RunLengthByteReader;
import java.io.IOException;
@@ -1200,6 +1202,147 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
}
+ protected static class Decimal64StreamReader extends Decimal64TreeReader implements SettableTreeReader {
+ private boolean _isFileCompressed;
+ private SettableUncompressedStream _presentStream;
+ private SettableUncompressedStream _valueStream;
+ private List<ColumnVector> vectors;
+ private int vectorIndex = 0;
+
+ private Decimal64StreamReader(int columnId, int precision, int scale,
+ SettableUncompressedStream presentStream,
+ SettableUncompressedStream valueStream,
+ boolean isFileCompressed,
+ OrcProto.ColumnEncoding encoding, TreeReaderFactory.Context context,
+ List<ColumnVector> vectors) throws IOException {
+ super(columnId, presentStream, valueStream, encoding,
+ precision, scale, context);
+ this._isFileCompressed = isFileCompressed;
+ this._presentStream = presentStream;
+ this._valueStream = valueStream;
+ this.vectors = vectors;
+ }
+
+ @Override
+ public void seek(PositionProvider index) throws IOException {
+ if (vectors != null) return;
+ if (present != null) {
+ if (_isFileCompressed) {
+ index.getNext();
+ }
+ present.seek(index);
+ }
+
+ // data stream could be empty stream or already reached end of stream before present stream.
+ // This can happen if all values in stream are nulls or last row group values are all null.
+ skipCompressedIndex(_isFileCompressed, index);
+ if (_valueStream.available() > 0) {
+ valueReader.seek(index);
+ } else {
+ skipSeek(index);
+ }
+ }
+
+ @Override
+ public void nextVector(
+ ColumnVector previousVector, boolean[] isNull, int batchSize) throws IOException {
+ if (vectors == null) {
+ super.nextVector(previousVector, isNull, batchSize);
+ return;
+ }
+ vectors.get(vectorIndex++).shallowCopyTo(previousVector);
+ if (vectorIndex == vectors.size()) {
+ vectors = null;
+ }
+ }
+
+ @Override
+ public void setBuffers(EncodedColumnBatch<OrcBatchKey> batch, boolean sameStripe) {
+ assert vectors == null; // See the comment in TimestampStreamReader.setBuffers.
+ ColumnStreamData[] streamsData = batch.getColumnData(columnId);
+ if (_presentStream != null) {
+ _presentStream.setBuffers(StreamUtils.createDiskRangeInfo(streamsData[OrcProto.Stream.Kind.PRESENT_VALUE]));
+ }
+ if (_valueStream != null) {
+ _valueStream.setBuffers(StreamUtils.createDiskRangeInfo(streamsData[OrcProto.Stream.Kind.DATA_VALUE]));
+ }
+ }
+
+ public static class StreamReaderBuilder {
+ private int columnIndex;
+ private ColumnStreamData presentStream;
+ private ColumnStreamData valueStream;
+ private int scale;
+ private int precision;
+ private CompressionCodec compressionCodec;
+ private OrcProto.ColumnEncoding columnEncoding;
+ private List<ColumnVector> vectors;
+ private TreeReaderFactory.Context context;
+
+ public StreamReaderBuilder setColumnIndex(int columnIndex) {
+ this.columnIndex = columnIndex;
+ return this;
+ }
+
+ public StreamReaderBuilder setPrecision(int precision) {
+ this.precision = precision;
+ return this;
+ }
+
+ public StreamReaderBuilder setScale(int scale) {
+ this.scale = scale;
+ return this;
+ }
+
+ public StreamReaderBuilder setContext(TreeReaderFactory.Context context) {
+ this.context = context;
+ return this;
+ }
+
+ public StreamReaderBuilder setPresentStream(ColumnStreamData presentStream) {
+ this.presentStream = presentStream;
+ return this;
+ }
+
+ public StreamReaderBuilder setValueStream(ColumnStreamData valueStream) {
+ this.valueStream = valueStream;
+ return this;
+ }
+
+
+ public StreamReaderBuilder setCompressionCodec(CompressionCodec compressionCodec) {
+ this.compressionCodec = compressionCodec;
+ return this;
+ }
+
+ public StreamReaderBuilder setColumnEncoding(OrcProto.ColumnEncoding encoding) {
+ this.columnEncoding = encoding;
+ return this;
+ }
+
+ public Decimal64StreamReader build() throws IOException {
+ SettableUncompressedStream presentInStream = StreamUtils.createSettableUncompressedStream(
+ OrcProto.Stream.Kind.PRESENT.name(), presentStream);
+
+ SettableUncompressedStream valueInStream = StreamUtils.createSettableUncompressedStream(
+ OrcProto.Stream.Kind.DATA.name(), valueStream);
+
+ boolean isFileCompressed = compressionCodec != null;
+ return new Decimal64StreamReader(columnIndex, precision, scale, presentInStream,
+ valueInStream, isFileCompressed, columnEncoding, context, vectors);
+ }
+
+ public StreamReaderBuilder setVectors(List<ColumnVector> vectors) {
+ this.vectors = vectors;
+ return this;
+ }
+ }
+
+ public static StreamReaderBuilder builder() {
+ return new StreamReaderBuilder();
+ }
+ }
+
protected static class DateStreamReader extends DateTreeReader implements SettableTreeReader {
private boolean isFileCompressed;
private SettableUncompressedStream _presentStream;
@@ -2101,8 +2244,8 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
public static StructTreeReader createRootTreeReader(TypeDescription[] batchSchemas,
- List<OrcProto.ColumnEncoding> encodings, OrcEncodedColumnBatch batch,
- CompressionCodec codec, TreeReaderFactory.Context context) throws IOException {
+ List<OrcProto.ColumnEncoding> encodings, OrcEncodedColumnBatch batch,
+ CompressionCodec codec, Context context, final boolean useDecimal64ColumnVectors) throws IOException {
// Note: we only look at the schema here to deal with complex types. Somebody has set up the
// reader with whatever ideas they had to the schema and we just trust the reader to
// produce the CVBs that was asked for. However, we only need to look at top level columns.
@@ -2117,7 +2260,7 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
if (!batch.hasData(batchColIx) && !batch.hasVectors(batchColIx)) {
throw new AssertionError("No data for column " + batchColIx + ": " + batchSchemas[i]);
}
- childReaders[i] = createEncodedTreeReader(batchSchemas[i], encodings, batch, codec, context);
+ childReaders[i] = createEncodedTreeReader(batchSchemas[i], encodings, batch, codec, context, useDecimal64ColumnVectors);
}
// TODO: do we actually need this reader? the caller just extracts child readers.
@@ -2138,8 +2281,8 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
private static TreeReader createEncodedTreeReader(TypeDescription schema,
- List<OrcProto.ColumnEncoding> encodings, OrcEncodedColumnBatch batch,
- CompressionCodec codec, TreeReaderFactory.Context context) throws IOException {
+ List<OrcProto.ColumnEncoding> encodings, OrcEncodedColumnBatch batch,
+ CompressionCodec codec, Context context, final boolean useDecimal64ColumnVectors) throws IOException {
int columnIndex = schema.getId();
ColumnStreamData[] streamBuffers = null;
List<ColumnVector> vectors = null;
@@ -2200,12 +2343,12 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
case TIMESTAMP:
case DATE:
return getPrimitiveTreeReader(columnIndex, schema, codec, columnEncoding,
- present, data, dictionary, lengths, secondary, context, vectors);
+ present, data, dictionary, lengths, secondary, context, vectors, useDecimal64ColumnVectors);
case LIST:
assert vectors == null; // Not currently supported.
TypeDescription elementType = schema.getChildren().get(0);
TreeReader elementReader = createEncodedTreeReader(
- elementType, encodings, batch, codec, context);
+ elementType, encodings, batch, codec, context, useDecimal64ColumnVectors);
return ListStreamReader.builder()
.setColumnIndex(columnIndex)
.setColumnEncoding(columnEncoding)
@@ -2220,9 +2363,9 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
TypeDescription keyType = schema.getChildren().get(0);
TypeDescription valueType = schema.getChildren().get(1);
TreeReader keyReader = createEncodedTreeReader(
- keyType, encodings, batch, codec, context);
+ keyType, encodings, batch, codec, context, useDecimal64ColumnVectors);
TreeReader valueReader = createEncodedTreeReader(
- valueType, encodings, batch, codec, context);
+ valueType, encodings, batch, codec, context, useDecimal64ColumnVectors);
return MapStreamReader.builder()
.setColumnIndex(columnIndex)
.setColumnEncoding(columnEncoding)
@@ -2240,7 +2383,7 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
for (int i = 0; i < childCount; i++) {
TypeDescription childType = schema.getChildren().get(i);
childReaders[i] = createEncodedTreeReader(
- childType, encodings, batch, codec, context);
+ childType, encodings, batch, codec, context, useDecimal64ColumnVectors);
}
return StructStreamReader.builder()
.setColumnIndex(columnIndex)
@@ -2258,7 +2401,7 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
for (int i = 0; i < childCount; i++) {
TypeDescription childType = schema.getChildren().get(i);
childReaders[i] = createEncodedTreeReader(
- childType, encodings, batch, codec, context);
+ childType, encodings, batch, codec, context, useDecimal64ColumnVectors);
}
return UnionStreamReader.builder()
.setColumnIndex(columnIndex)
@@ -2276,10 +2419,10 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
}
private static TreeReader getPrimitiveTreeReader(final int columnIndex,
- TypeDescription columnType, CompressionCodec codec, OrcProto.ColumnEncoding columnEncoding,
- ColumnStreamData present, ColumnStreamData data, ColumnStreamData dictionary,
- ColumnStreamData lengths, ColumnStreamData secondary, TreeReaderFactory.Context context,
- List<ColumnVector> vectors) throws IOException {
+ TypeDescription columnType, CompressionCodec codec, OrcProto.ColumnEncoding columnEncoding,
+ ColumnStreamData present, ColumnStreamData data, ColumnStreamData dictionary,
+ ColumnStreamData lengths, ColumnStreamData secondary, Context context,
+ List<ColumnVector> vectors, final boolean useDecimal64ColumnVectors) throws IOException {
switch (columnType.getCategory()) {
case BINARY:
return BinaryStreamReader.builder()
@@ -2390,7 +2533,36 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
.setVectors(vectors)
.build();
case DECIMAL:
- return DecimalStreamReader.builder()
+ // special handling for serde reader (text) in llap IO.
+ // if file format version is null, then we are processing text IF in LLAP IO, in which case
+ // we get vectors instead of streams. If vectors contain instance of Decimal64ColumnVector we
+ // should use Decimal64StreamReader (which acts as a wrapper around vectors)
+ boolean useDecimal64Reader = context.getFileFormat() == null && vectors != null && useDecimal64ColumnVectors;
+ if (useDecimal64Reader) {
+ boolean containDecimal64CV = false;
+ for (ColumnVector vector : vectors) {
+ if (vector instanceof Decimal64ColumnVector) {
+ containDecimal64CV = true;
+ break;
+ }
+ }
+ useDecimal64Reader &= containDecimal64CV;
+ }
+ if ((context.getFileFormat() == OrcFile.Version.UNSTABLE_PRE_2_0 || useDecimal64Reader) &&
+ columnType.getPrecision() <= TypeDescription.MAX_DECIMAL64_PRECISION) {
+ return Decimal64StreamReader.builder()
+ .setColumnIndex(columnIndex)
+ .setPrecision(columnType.getPrecision())
+ .setScale(columnType.getScale())
+ .setPresentStream(present)
+ .setValueStream(data)
+ .setCompressionCodec(codec)
+ .setColumnEncoding(columnEncoding)
+ .setVectors(vectors)
+ .setContext(context)
+ .build();
+ } else {
+ return DecimalStreamReader.builder()
.setColumnIndex(columnIndex)
.setPrecision(columnType.getPrecision())
.setScale(columnType.getScale())
@@ -2402,6 +2574,7 @@ public class EncodedTreeReaderFactory extends TreeReaderFactory {
.setVectors(vectors)
.setContext(context)
.build();
+ }
case TIMESTAMP:
return TimestampStreamReader.builder()
.setColumnIndex(columnIndex)
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetInputFormat.java
index ed6d577..5e70a05 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetInputFormat.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hive.common.io.DataCache;
import org.apache.hadoop.hive.common.io.FileMetadataCache;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedSupport;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
import org.apache.hadoop.hive.ql.io.InputFormatChecker;
import org.apache.hadoop.hive.ql.io.LlapCacheOnlyInputFormatInterface;
@@ -115,4 +116,9 @@ public class MapredParquetInputFormat extends FileInputFormat<NullWritable, Arra
return true;
}
+
+ @Override
+ public VectorizedSupport.Support[] getSupportedFeatures() {
+ return null;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 394f826..f4e8207 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.optimizer.physical;
import static org.apache.hadoop.hive.ql.plan.ReduceSinkDesc.ReducerTraits.UNIFORM;
+import java.io.IOException;
import java.io.Serializable;
import java.lang.annotation.Annotation;
import java.util.ArrayList;
@@ -40,7 +41,9 @@ import java.util.regex.Pattern;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
import org.apache.hadoop.hive.ql.exec.vector.reducesink.*;
+import org.apache.hadoop.hive.ql.exec.vector.udf.VectorUDFArgDesc;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.parse.spark.SparkPartitionPruningSinkOperator;
import org.slf4j.Logger;
@@ -129,7 +132,6 @@ import org.apache.hadoop.hive.ql.plan.VectorPTFDesc;
import org.apache.hadoop.hive.ql.plan.VectorPTFInfo;
import org.apache.hadoop.hive.ql.plan.VectorPTFDesc.SupportedFunctionType;
import org.apache.hadoop.hive.ql.plan.VectorTableScanDesc;
-import org.apache.hadoop.hive.ql.plan.VectorizationCondition;
import org.apache.hadoop.hive.ql.plan.VectorGroupByDesc.ProcessingMode;
import org.apache.hadoop.hive.ql.plan.VectorSparkHashTableSinkDesc;
import org.apache.hadoop.hive.ql.plan.VectorSparkPartitionPruningSinkDesc;
@@ -1207,6 +1209,14 @@ public class Vectorizer implements PhysicalPlanResolver {
private Support[] getVectorizedInputFormatSupports(
Class<? extends InputFormat> inputFileFormatClass) {
+ try {
+ InputFormat inputFormat = FetchOperator.getInputFormatFromCache(inputFileFormatClass, hiveConf);
+ if (inputFormat instanceof VectorizedInputFormatInterface) {
+ return ((VectorizedInputFormatInterface) inputFormat).getSupportedFeatures();
+ }
+ } catch (IOException e) {
+ LOG.error("Unable to instantiate {} input format class. Cannot determine vectorization support.", e);
+ }
// FUTURE: Decide how to ask an input file format what vectorization features it supports.
return null;
}
@@ -1830,14 +1840,6 @@ public class Vectorizer implements PhysicalPlanResolver {
supportRemovedReasons.add(removeString);
}
- // And, if LLAP is enabled for now, disable DECIMAL_64;
- if (isLlapIoEnabled && supportSet.contains(Support.DECIMAL_64)) {
- supportSet.remove(Support.DECIMAL_64);
- String removeString =
- "DECIMAL_64 disabled because LLAP is enabled";
- supportRemovedReasons.add(removeString);
- }
-
// Now rememember what is supported for this query and any support that was
// removed.
vectorTaskColumnInfo.setSupportSetInUse(supportSet);
@@ -2246,6 +2248,7 @@ public class Vectorizer implements PhysicalPlanResolver {
@Override
public PhysicalContext resolve(PhysicalContext physicalContext) throws SemanticException {
+ physicalContext = physicalContext;
hiveConf = physicalContext.getConf();
planMapper = physicalContext.getContext().getPlanMapper();
@@ -4265,6 +4268,13 @@ public class Vectorizer implements PhysicalPlanResolver {
vecAggrClasses, aggregateName, inputColVectorType,
outputColVectorType, udafEvaluatorMode);
if (vecAggrClass != null) {
+ // for now, disable operating on decimal64 column vectors for semijoin reduction as
+ // we have to make sure same decimal type should be used during bloom filter creation
+ // and bloom filter probing
+ if (aggregateName.equals("bloom_filter")) {
+ inputExpression = vContext.wrapWithDecimal64ToDecimalConversion(inputExpression);
+ inputColVectorType = ColumnVector.Type.DECIMAL;
+ }
final VectorAggregationDesc vecAggrDesc =
new VectorAggregationDesc(
aggrDesc, evaluator, inputTypeInfo, inputColVectorType, inputExpression,
@@ -4359,8 +4369,6 @@ public class Vectorizer implements PhysicalPlanResolver {
return new ImmutablePair<Operator<? extends OperatorDesc>, String>(vectorOp, null);
}
- static int fake;
-
public static Operator<? extends OperatorDesc> vectorizeSelectOperator(
Operator<? extends OperatorDesc> selectOp, VectorizationContext vContext,
VectorSelectDesc vectorSelectDesc)
@@ -4386,6 +4394,13 @@ public class Vectorizer implements PhysicalPlanResolver {
if (index < size) {
vectorSelectExprs = Arrays.copyOf(vectorSelectExprs, index);
}
+
+ // Fix up the case where parent expression's output data type physical variations is DECIMAL whereas
+ // at least one of its children is DECIMAL_64. Some expressions like x % y for example only accepts DECIMAL
+ // for x and y (at this time there is only DecimalColModuloDecimalColumn so both x and y has to be DECIMAL).
+ // The following method introduces a cast if x or y is DECIMAL_64 and parent expression (x % y) is DECIMAL.
+ fixDecimalDataTypePhysicalVariations(vContext, vectorSelectExprs);
+
vectorSelectDesc.setSelectExpressions(vectorSelectExprs);
vectorSelectDesc.setProjectedOutputColumns(projectedOutputColumns);
@@ -4394,6 +4409,84 @@ public class Vectorizer implements PhysicalPlanResolver {
vContext, vectorSelectDesc);
}
+ private static void fixDecimalDataTypePhysicalVariations(final VectorizationContext vContext,
+ final VectorExpression[] vectorSelectExprs) throws HiveException {
+ for (int i = 0; i < vectorSelectExprs.length; i++) {
+ VectorExpression parent = vectorSelectExprs[i];
+ VectorExpression newParent = fixDecimalDataTypePhysicalVariations(parent, parent.getChildExpressions(),
+ vContext);
+ if (parent.getClass() == newParent.getClass() && parent != newParent) {
+ vectorSelectExprs[i] = newParent;
+ }
+ }
+ }
+
+ private static VectorExpression fixDecimalDataTypePhysicalVariations(final VectorExpression parent,
+ final VectorExpression[] children, final VectorizationContext vContext) throws HiveException {
+ if (children == null || children.length == 0) {
+ return parent;
+ }
+
+ for (int i = 0; i < children.length; i++) {
+ VectorExpression child = children[i];
+ VectorExpression newChild = fixDecimalDataTypePhysicalVariations(child, child.getChildExpressions(), vContext);
+ if (child.getClass() == newChild.getClass() && child != newChild) {
+ children[i] = newChild;
+ }
+ }
+ if (parent.getOutputDataTypePhysicalVariation() == DataTypePhysicalVariation.NONE) {
+ boolean inputArgsChanged = false;
+ DataTypePhysicalVariation[] dataTypePhysicalVariations = parent.getInputDataTypePhysicalVariations();
+ VectorExpression oldExpression = null;
+ VectorExpression newExpression = null;
+ for (int i = 0; i < children.length; i++) {
+ oldExpression = children[i];
+ // we found at least one children with mismatch
+ if (oldExpression.getOutputDataTypePhysicalVariation() == DataTypePhysicalVariation.DECIMAL_64) {
+ newExpression = vContext.wrapWithDecimal64ToDecimalConversion(oldExpression);
+ children[i] = newExpression;
+ inputArgsChanged = true;
+ dataTypePhysicalVariations[i] = DataTypePhysicalVariation.NONE;
+ }
+ }
+ // fix up the input column numbers and output column numbers
+ if (inputArgsChanged) {
+ if (parent instanceof VectorUDFAdaptor) {
+ VectorUDFAdaptor parentAdaptor = (VectorUDFAdaptor) parent;
+ VectorUDFArgDesc[] argDescs = parentAdaptor.getArgDescs();
+ for (VectorUDFArgDesc argDesc : argDescs) {
+ if (argDesc.getColumnNum() == oldExpression.getOutputColumnNum()) {
+ argDesc.setColumnNum(newExpression.getOutputColumnNum());
+ break;
+ }
+ }
+ } else {
+ int argumentCount = children.length + (parent.getOutputColumnNum() == -1 ? 0 : 1);
+ Object[] arguments = new Object[argumentCount];
+ // new input column numbers
+ for (int i = 0; i < children.length; i++) {
+ VectorExpression vce = children[i];
+ arguments[i] = vce.getOutputColumnNum();
+ }
+ // retain output column number from parent
+ if (parent.getOutputColumnNum() != -1) {
+ arguments[arguments.length - 1] = parent.getOutputColumnNum();
+ }
+ // re-instantiate the parent expression with new arguments
+ VectorExpression newParent = vContext.instantiateExpression(parent.getClass(), parent.getOutputTypeInfo(),
+ parent.getOutputDataTypePhysicalVariation(), arguments);
+ newParent.setOutputTypeInfo(parent.getOutputTypeInfo());
+ newParent.setOutputDataTypePhysicalVariation(parent.getOutputDataTypePhysicalVariation());
+ newParent.setInputTypeInfos(parent.getInputTypeInfos());
+ newParent.setInputDataTypePhysicalVariations(dataTypePhysicalVariations);
+ newParent.setChildExpressions(parent.getChildExpressions());
+ return newParent;
+ }
+ }
+ }
+ return parent;
+ }
+
private static void fillInPTFEvaluators(
List<WindowFunctionDef> windowsFunctions,
String[] evaluatorFunctionNames,
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index dc58ad1..fe475f6 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -229,7 +229,7 @@ public class TestInputOutputFormat {
return "booleanValue,byteValue,shortValue,intValue,longValue,floatValue,doubleValue,stringValue,decimalValue,dateValue,timestampValue";
}
static String getColumnTypesProperty() {
- return "boolean:tinyint:smallint:int:bigint:float:double:string:decimal:date:timestamp";
+ return "boolean:tinyint:smallint:int:bigint:float:double:string:decimal(38,18):date:timestamp";
}
}
@@ -3847,9 +3847,10 @@ public class TestInputOutputFormat {
* Test schema evolution when using the reader directly.
*/
@Test
- public void testSchemaEvolution() throws Exception {
+ public void testSchemaEvolutionOldDecimal() throws Exception {
TypeDescription fileSchema =
TypeDescription.fromString("struct<a:int,b:struct<c:int>,d:string>");
+ conf.set(ConfVars.HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED.varname, "decimal_64");
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.fileSystem(fs)
@@ -3915,6 +3916,78 @@ public class TestInputOutputFormat {
}
/**
+ * Test schema evolution when using the reader directly.
+ */
+ @Test
+ public void testSchemaEvolutionDecimal64() throws Exception {
+ TypeDescription fileSchema =
+ TypeDescription.fromString("struct<a:int,b:struct<c:int>,d:string>");
+ conf.set(ConfVars.HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED.varname, "decimal_64");
+ Writer writer = OrcFile.createWriter(testFilePath,
+ OrcFile.writerOptions(conf)
+ .fileSystem(fs)
+ .setSchema(fileSchema)
+ .compress(org.apache.orc.CompressionKind.NONE));
+ VectorizedRowBatch batch = fileSchema.createRowBatch(TypeDescription.RowBatchVersion.USE_DECIMAL64,1000);
+ batch.size = 1000;
+ LongColumnVector lcv = ((LongColumnVector) ((StructColumnVector) batch.cols[1]).fields[0]);
+ for(int r=0; r < 1000; r++) {
+ ((LongColumnVector) batch.cols[0]).vector[r] = r * 42;
+ lcv.vector[r] = r * 10001;
+ ((BytesColumnVector) batch.cols[2]).setVal(r,
+ Integer.toHexString(r).getBytes(StandardCharsets.UTF_8));
+ }
+ writer.addRowBatch(batch);
+ writer.close();
+ TypeDescription readerSchema = TypeDescription.fromString(
+ "struct<a:int,b:struct<c:int,future1:int>,d:string,future2:int>");
+ Reader reader = OrcFile.createReader(testFilePath,
+ OrcFile.readerOptions(conf).filesystem(fs));
+ RecordReader rows = reader.rowsOptions(new Reader.Options()
+ .schema(readerSchema));
+ batch = readerSchema.createRowBatchV2();
+ lcv = ((LongColumnVector) ((StructColumnVector) batch.cols[1]).fields[0]);
+ LongColumnVector future1 = ((LongColumnVector) ((StructColumnVector) batch.cols[1]).fields[1]);
+ assertEquals(true, rows.nextBatch(batch));
+ assertEquals(1000, batch.size);
+ assertEquals(true, future1.isRepeating);
+ assertEquals(true, future1.isNull[0]);
+ assertEquals(true, batch.cols[3].isRepeating);
+ assertEquals(true, batch.cols[3].isNull[0]);
+ for(int r=0; r < batch.size; ++r) {
+ assertEquals("row " + r, r * 42, ((LongColumnVector) batch.cols[0]).vector[r]);
+ assertEquals("row " + r, r * 10001, lcv.vector[r]);
+ assertEquals("row " + r, r * 10001, lcv.vector[r]);
+ assertEquals("row " + r, Integer.toHexString(r),
+ ((BytesColumnVector) batch.cols[2]).toString(r));
+ }
+ assertEquals(false, rows.nextBatch(batch));
+ rows.close();
+
+ // try it again with an include vector
+ rows = reader.rowsOptions(new Reader.Options()
+ .schema(readerSchema)
+ .include(new boolean[]{false, true, true, true, false, false, true}));
+ batch = readerSchema.createRowBatchV2();
+ lcv = ((LongColumnVector) ((StructColumnVector) batch.cols[1]).fields[0]);
+ future1 = ((LongColumnVector) ((StructColumnVector) batch.cols[1]).fields[1]);
+ assertEquals(true, rows.nextBatch(batch));
+ assertEquals(1000, batch.size);
+ assertEquals(true, future1.isRepeating);
+ assertEquals(true, future1.isNull[0]);
+ assertEquals(true, batch.cols[3].isRepeating);
+ assertEquals(true, batch.cols[3].isNull[0]);
+ assertEquals(true, batch.cols[2].isRepeating);
+ assertEquals(true, batch.cols[2].isNull[0]);
+ for(int r=0; r < batch.size; ++r) {
+ assertEquals("row " + r, r * 42, ((LongColumnVector) batch.cols[0]).vector[r]);
+ assertEquals("row " + r, r * 10001, lcv.vector[r]);
+ }
+ assertEquals(false, rows.nextBatch(batch));
+ rows.close();
+ }
+
+ /**
* Test column projection when using ACID.
*/
@Test
@@ -3933,7 +4006,7 @@ public class TestInputOutputFormat {
.fileSystem(fs)
.setSchema(fileSchema)
.compress(org.apache.orc.CompressionKind.NONE));
- VectorizedRowBatch batch = fileSchema.createRowBatch(1000);
+ VectorizedRowBatch batch = fileSchema.createRowBatch(TypeDescription.RowBatchVersion.USE_DECIMAL64,1000);
batch.size = 1000;
StructColumnVector scv = (StructColumnVector)batch.cols[5];
// operation
@@ -4047,7 +4120,7 @@ public class TestInputOutputFormat {
.stripeSize(128);
// Create ORC file with small stripe size so we can write multiple stripes.
Writer writer = OrcFile.createWriter(testFilePath, options);
- VectorizedRowBatch batch = fileSchema.createRowBatch(1000);
+ VectorizedRowBatch batch = fileSchema.createRowBatch(TypeDescription.RowBatchVersion.USE_DECIMAL64,1000);
batch.size = 1000;
StructColumnVector scv = (StructColumnVector)batch.cols[5];
// operation
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
index d8a7af8..cc29384 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
@@ -168,7 +168,7 @@ public class TestOrcRawRecordMerger {
setRow(row4, OrcRecordUpdater.INSERT_OPERATION, 40, 50, 60, 130, "fourth");
OrcStruct row5 = new OrcStruct(OrcRecordUpdater.FIELDS);
setRow(row5, OrcRecordUpdater.INSERT_OPERATION, 40, 50, 61, 140, "fifth");
- Mockito.when(reader.rowsOptions(Mockito.any(Reader.Options.class)))
+ Mockito.when(reader.rowsOptions(Mockito.any(Reader.Options.class), Mockito.any(HiveConf.class)))
.thenReturn(recordReader);
Mockito.when(recordReader.hasNext()).
@@ -192,7 +192,7 @@ public class TestOrcRawRecordMerger {
RecordIdentifier minKey = new RecordIdentifier(10, 20, 30);
RecordIdentifier maxKey = new RecordIdentifier(40, 50, 60);
ReaderPair pair = new OrcRawRecordMerger.ReaderPairAcid(key, reader, minKey, maxKey,
- new Reader.Options());
+ new Reader.Options(), new HiveConf());
RecordReader recordReader = pair.getRecordReader();
assertEquals(10, key.getWriteId());
assertEquals(20, key.getBucketProperty());
@@ -218,7 +218,7 @@ public class TestOrcRawRecordMerger {
Reader reader = createMockReader();
ReaderPair pair = new OrcRawRecordMerger.ReaderPairAcid(key, reader, null, null,
- new Reader.Options());
+ new Reader.Options(), new HiveConf());
RecordReader recordReader = pair.getRecordReader();
assertEquals(10, key.getWriteId());
assertEquals(20, key.getBucketProperty());
@@ -274,7 +274,7 @@ public class TestOrcRawRecordMerger {
OrcStruct row4 = createOriginalRow("fourth");
OrcStruct row5 = createOriginalRow("fifth");
- Mockito.when(reader.rowsOptions(Mockito.any(Reader.Options.class)))
+ Mockito.when(reader.rowsOptions(Mockito.any(Reader.Options.class), Mockito.any(HiveConf.class)))
.thenReturn(recordReader);
Mockito.when(recordReader.hasNext()).
thenReturn(true, true, true, true, true, false);
@@ -410,7 +410,7 @@ public class TestOrcRawRecordMerger {
types.add(typeBuilder.build());
Mockito.when(reader.getTypes()).thenReturn(types);
- Mockito.when(reader.rowsOptions(Mockito.any(Reader.Options.class)))
+ Mockito.when(reader.rowsOptions(Mockito.any(Reader.Options.class), Mockito.any(HiveConf.class)))
.thenReturn(recordReader);
OrcStruct row1 = new OrcStruct(OrcRecordUpdater.FIELDS);
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedORCReader.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedORCReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedORCReader.java
index 0c9c95d..c23f00e 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedORCReader.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedORCReader.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
+import org.apache.orc.TypeDescription;
import org.junit.Before;
import org.junit.Test;
@@ -151,7 +152,7 @@ public class TestVectorizedORCReader {
OrcFile.readerOptions(conf));
RecordReaderImpl vrr = (RecordReaderImpl) vreader.rows();
RecordReaderImpl rr = (RecordReaderImpl) reader.rows();
- VectorizedRowBatch batch = reader.getSchema().createRowBatch();
+ VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
OrcStruct row = null;
// Check Vectorized ORC reader against ORC row reader
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
index e478371..551e5ca 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
@@ -226,7 +226,7 @@ public class TestVectorizedOrcAcidRowBatchReader {
assertTrue(vectorizedReader.getDeleteEventRegistry() instanceof SortMergedDeleteEventRegistry);
}
TypeDescription schema = OrcInputFormat.getDesiredRowTypeDescr(conf, true, Integer.MAX_VALUE);
- VectorizedRowBatch vectorizedRowBatch = schema.createRowBatch();
+ VectorizedRowBatch vectorizedRowBatch = schema.createRowBatchV2();
vectorizedRowBatch.setPartitionInfo(1, 0); // set data column count as 1.
long previousPayload = Long.MIN_VALUE;
while (vectorizedReader.next(null, vectorizedRowBatch)) {
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/explainanalyze_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainanalyze_3.q b/ql/src/test/queries/clientpositive/explainanalyze_3.q
index 3d5b3a8..1f31218 100644
--- a/ql/src/test/queries/clientpositive/explainanalyze_3.q
+++ b/ql/src/test/queries/clientpositive/explainanalyze_3.q
@@ -110,7 +110,7 @@ select * from cte;
explain analyze with cte as (select * from src order by key limit 5)
select * from cte;
-create table orc_merge5_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc;
+create table orc_merge5_n1 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc;
load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n1;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/llap_acid2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/llap_acid2.q b/ql/src/test/queries/clientpositive/llap_acid2.q
index a409c26..cd06d31 100644
--- a/ql/src/test/queries/clientpositive/llap_acid2.q
+++ b/ql/src/test/queries/clientpositive/llap_acid2.q
@@ -29,18 +29,27 @@ CREATE TABLE orc_llap_n2 (
cfloat1 FLOAT,
cdouble1 DOUBLE,
cstring1 string,
- cfloat2 float
-) stored as orc TBLPROPERTIES ('transactional'='true');
+ cfloat2 float,
+ cdecimal1 decimal(10,3),
+ cdecimal2 decimal(38,10)
+) stored as orc TBLPROPERTIES ('transactional'='true','orc.write.format'='UNSTABLE-PRE-2.0');
insert into table orc_llap_n2
select cint, cbigint, cfloat, cdouble,
cint as c1, cbigint as c2, cfloat as c3, cdouble as c4,
cint as c8, cbigint as c7, cfloat as c6, cdouble as c5,
- cstring1, cfloat as c9 from alltypesorc order by cdouble asc limit 30;
-
+ cstring1, cfloat as c9, cast("1.123" as decimal(10,3))as c10,
+ cast("1.123456789" as decimal(38,18)) as c11 from alltypesorc order by cdouble asc limit 30;
+alter table orc_llap_n2 set TBLPROPERTIES ('transactional'='true','orc.write.format'='0.12');
+insert into table orc_llap_n2
+select cint, cbigint, cfloat, cdouble,
+ cint as c1, cbigint as c2, cfloat as c3, cdouble as c4,
+ cint as c8, cbigint as c7, cfloat as c6, cdouble as c5,
+ cstring1, cfloat as c9, cast("3.321" as decimal(10,3))as c10,
+ cast("9.987654321" as decimal(38,18)) as c11 from alltypesorc order by cdouble asc limit 30;
CREATE TABLE orc_llap2 (
@@ -57,18 +66,22 @@ CREATE TABLE orc_llap2 (
cfloat1 FLOAT,
cdouble1 DOUBLE,
cstring1 string,
- cfloat2 float
-) stored as orc TBLPROPERTIES ('transactional'='false');
+ cfloat2 float,
+ cdecimal1 decimal(10,3),
+ cdecimal2 decimal(38,10)
+) stored as orc TBLPROPERTIES ('transactional'='false', 'orc.write.format'='UNSTABLE-PRE-2.0');
insert into table orc_llap2
select cint, cbigint, cfloat, cdouble,
cint as c1, cbigint as c2, cfloat as c3, cdouble as c4,
cint as c8, cbigint as c7, cfloat as c6, cdouble as c5,
- cstring1, cfloat as c9 from alltypesorc order by cdouble asc limit 30;
+ cstring1, cfloat as c9, cast("1.123" as decimal(10,3))as c10,
+ cast("1.123456789" as decimal(38,18)) as c11 from alltypesorc order by cdouble asc limit 30;
-alter table orc_llap2 set TBLPROPERTIES ('transactional'='true');
+alter table orc_llap2 set TBLPROPERTIES ('transactional'='true','orc.write.format'='0.12');
-update orc_llap2 set cstring1 = 'testvalue' where cstring1 = 'N016jPED08o';
+update orc_llap2 set cstring1 = 'testvalue', cdecimal1 = cast("3.321" as decimal(10,3)),
+cdecimal2 = cast("9.987654321" as decimal(38,18)) where cstring1 = 'N016jPED08o';
SET hive.llap.io.enabled=true;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/llap_decimal64_reader.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/llap_decimal64_reader.q b/ql/src/test/queries/clientpositive/llap_decimal64_reader.q
new file mode 100644
index 0000000..a81feba
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/llap_decimal64_reader.q
@@ -0,0 +1,54 @@
+--! qt:dataset:alltypesorc
+SET hive.vectorized.execution.enabled=true;
+
+SET hive.llap.io.enabled=false;
+
+SET hive.exec.orc.default.row.index.stride=1000;
+SET hive.optimize.index.filter=true;
+set hive.auto.convert.join=false;
+
+DROP TABLE orc_llap_n0;
+
+-- this test mix and matches orc versions and flips config to use decimal64 column vectors
+set hive.auto.convert.join=true;
+SET hive.llap.io.enabled=true;
+CREATE TABLE orc_llap_n0(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+ cboolean2 BOOLEAN,
+ cdecimal1 decimal(10,2),
+ cdecimal2 decimal(38,5))
+ STORED AS ORC tblproperties ("orc.compress"="NONE");
+
+insert into table orc_llap_n0
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2,
+ cast("3.345" as decimal(10,2)), cast("5.56789" as decimal(38,5)) from alltypesorc;
+
+alter table orc_llap_n0 set tblproperties ("orc.compress"="NONE", 'orc.write.format'='UNSTABLE-PRE-2.0');
+
+insert into table orc_llap_n0
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2,
+ cast("4.456" as decimal(10,2)), cast("5.56789" as decimal(38,5)) from alltypesorc;
+
+set hive.vectorized.input.format.supports.enabled=decimal_64;
+explain vectorization select cdecimal1,cdecimal2 from orc_llap_n0 where cdecimal1 = cast("3.345" as decimal(10,2)) or cdecimal1 = cast("4.456" as decimal(10,2))
+ group by cdecimal1,cdecimal2 limit 2;
+select cdecimal1,cdecimal2 from orc_llap_n0 where cdecimal1 = cast("3.345" as decimal(10,2)) or cdecimal1 = cast("4.456" as decimal(10,2))
+ group by cdecimal1,cdecimal2 limit 2;
+
+set hive.vectorized.input.format.supports.enabled=none;
+explain vectorization select cdecimal1,cdecimal2 from orc_llap_n0 where cdecimal1 = cast("3.345" as decimal(10,2)) or cdecimal1 = cast("4.456" as decimal(10,2))
+ group by cdecimal1,cdecimal2 limit 2;
+select cdecimal1,cdecimal2 from orc_llap_n0 where cdecimal1 = cast("3.345" as decimal(10,2)) or cdecimal1 = cast("4.456" as decimal(10,2))
+ group by cdecimal1,cdecimal2 limit 2;
+
+DROP TABLE orc_llap_n0;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/llap_uncompressed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/llap_uncompressed.q b/ql/src/test/queries/clientpositive/llap_uncompressed.q
index 875356c..de3cdc6 100644
--- a/ql/src/test/queries/clientpositive/llap_uncompressed.q
+++ b/ql/src/test/queries/clientpositive/llap_uncompressed.q
@@ -24,13 +24,20 @@ CREATE TABLE orc_llap_n0(
ctimestamp1 TIMESTAMP,
ctimestamp2 TIMESTAMP,
cboolean1 BOOLEAN,
- cboolean2 BOOLEAN)
+ cboolean2 BOOLEAN,
+ cdecimal1 decimal(10,2),
+ cdecimal2 decimal(38,5))
STORED AS ORC tblproperties ("orc.compress"="NONE");
insert into table orc_llap_n0
-select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2
-from alltypesorc;
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2,
+ cast("3.345" as decimal(10,2)), cast("5.56789" as decimal(38,5)) from alltypesorc;
+alter table orc_llap_n0 set tblproperties ("orc.compress"="NONE", 'orc.write.format'='UNSTABLE-PRE-2.0');
+
+insert into table orc_llap_n0
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2,
+ cast("3.345" as decimal(10,2)), cast("5.56789" as decimal(38,5)) from alltypesorc;
SET hive.llap.io.enabled=true;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/orc_create.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_create.q b/ql/src/test/queries/clientpositive/orc_create.q
index 6d41009..dfae138 100644
--- a/ql/src/test/queries/clientpositive/orc_create.q
+++ b/ql/src/test/queries/clientpositive/orc_create.q
@@ -78,7 +78,7 @@ CREATE TABLE orc_create_people_staging (
first_name string,
last_name string,
address string,
- salary decimal,
+ salary decimal(38,0),
start_date timestamp,
state string);
@@ -90,7 +90,7 @@ CREATE TABLE orc_create_people (
first_name string,
last_name string,
address string,
- salary decimal,
+ salary decimal(38,0),
start_date timestamp)
PARTITIONED BY (state string)
STORED AS orc;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/orc_llap_counters.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_llap_counters.q b/ql/src/test/queries/clientpositive/orc_llap_counters.q
index 9f8e3bb..f3ee76c 100644
--- a/ql/src/test/queries/clientpositive/orc_llap_counters.q
+++ b/ql/src/test/queries/clientpositive/orc_llap_counters.q
@@ -40,7 +40,8 @@ CREATE TABLE orc_ppd_staging_n0(t tinyint,
bin binary)
STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
-insert overwrite table orc_ppd_staging_n0 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging_n6 order by t, s;
+insert overwrite table orc_ppd_staging_n0 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from staging_n6 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin;
-- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values
-- which makes it hard to test bloom filters
@@ -62,7 +63,8 @@ CREATE TABLE orc_ppd_n1(t tinyint,
bin binary)
STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
-insert overwrite table orc_ppd_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n0 order by t, s;
+insert overwrite table orc_ppd_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging_n0 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin;
describe formatted orc_ppd_n1;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/orc_llap_counters1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_llap_counters1.q b/ql/src/test/queries/clientpositive/orc_llap_counters1.q
index 16df96c..f12870c 100644
--- a/ql/src/test/queries/clientpositive/orc_llap_counters1.q
+++ b/ql/src/test/queries/clientpositive/orc_llap_counters1.q
@@ -39,7 +39,8 @@ CREATE TABLE orc_ppd_staging(t tinyint,
bin binary)
STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
-insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging order by t, s;
+insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from staging order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin;
-- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values
-- which makes it hard to test bloom filters
@@ -61,7 +62,9 @@ CREATE TABLE orc_ppd(t tinyint,
bin binary)
STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
-insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging order by t, s;
+insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin;
+
describe formatted orc_ppd;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/orc_merge11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge11.q b/ql/src/test/queries/clientpositive/orc_merge11.q
index d5add84..208c5b7 100644
--- a/ql/src/test/queries/clientpositive/orc_merge11.q
+++ b/ql/src/test/queries/clientpositive/orc_merge11.q
@@ -3,15 +3,15 @@ set hive.vectorized.execution.enabled=false;
DROP TABLE orcfile_merge1_n2;
DROP TABLE orc_split_elim_n0;
-create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc;
+create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc;
load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_n0;
load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_n0;
-create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096");
+create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc tblproperties("orc.compress.size"="4096");
-insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0;
-insert into table orcfile_merge1_n2 select * from orc_split_elim_n0;
+insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0 order by userid;
+insert into table orcfile_merge1_n2 select * from orc_split_elim_n0 order by userid;
dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1_n2/;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/orc_merge5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge5.q b/ql/src/test/queries/clientpositive/orc_merge5.q
index 190c6e0..4ae5ba6 100644
--- a/ql/src/test/queries/clientpositive/orc_merge5.q
+++ b/ql/src/test/queries/clientpositive/orc_merge5.q
@@ -3,8 +3,8 @@ set hive.explain.user=false;
-- SORT_QUERY_RESULTS
-create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc;
-create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc;
+create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc;
+create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc;
load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n5;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/orc_merge6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge6.q b/ql/src/test/queries/clientpositive/orc_merge6.q
index fabe656..1c7ab08 100644
--- a/ql/src/test/queries/clientpositive/orc_merge6.q
+++ b/ql/src/test/queries/clientpositive/orc_merge6.q
@@ -4,8 +4,8 @@ set hive.explain.user=false;
-- SORT_QUERY_RESULTS
-- orc file merge tests for static partitions
-create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc;
-create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc;
+create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc;
+create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (year string, hour int) stored as orc;
load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n4;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/orc_merge7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge7.q b/ql/src/test/queries/clientpositive/orc_merge7.q
index 2558797..6504989 100644
--- a/ql/src/test/queries/clientpositive/orc_merge7.q
+++ b/ql/src/test/queries/clientpositive/orc_merge7.q
@@ -5,8 +5,8 @@ set hive.explain.user=false;
-- orc merge file tests for dynamic partition case
-create table orc_merge5_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc;
-create table orc_merge5a_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc;
+create table orc_merge5_n2 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc;
+create table orc_merge5a_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st double) stored as orc;
load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n2;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat1.q b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
index aba4617..2b768ea 100644
--- a/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
+++ b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
@@ -3,8 +3,8 @@ set hive.explain.user=false;
-- SORT_QUERY_RESULTS
-create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc;
-create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc;
+create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc;
+create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc;
load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5_n3;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/orc_merge_incompat2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat2.q b/ql/src/test/queries/clientpositive/orc_merge_incompat2.q
index ef66522..6281c96 100644
--- a/ql/src/test/queries/clientpositive/orc_merge_incompat2.q
+++ b/ql/src/test/queries/clientpositive/orc_merge_incompat2.q
@@ -6,8 +6,8 @@ set hive.explain.user=false;
-- orc merge file tests for dynamic partition case
-create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc;
-create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc;
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc;
+create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st double) stored as orc;
load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/orc_ppd_basic.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_ppd_basic.q b/ql/src/test/queries/clientpositive/orc_ppd_basic.q
index f0b0b96..b3f166a 100644
--- a/ql/src/test/queries/clientpositive/orc_ppd_basic.q
+++ b/ql/src/test/queries/clientpositive/orc_ppd_basic.q
@@ -40,7 +40,8 @@ CREATE TABLE orc_ppd_staging_n1(t tinyint,
bin binary)
STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
-insert overwrite table orc_ppd_staging_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging_n7 order by t, s;
+insert overwrite table orc_ppd_staging_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from staging_n7 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin;
-- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values
-- which makes it hard to test bloom filters
@@ -62,7 +63,9 @@ CREATE TABLE orc_ppd_n2(t tinyint,
bin binary)
STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
-insert overwrite table orc_ppd_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n1 order by t, s;
+insert overwrite table orc_ppd_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging_n1 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin;
+
SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_3a.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_3a.q b/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_3a.q
index 4235c2c..9d79b11 100644
--- a/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_3a.q
+++ b/ql/src/test/queries/clientpositive/orc_ppd_schema_evol_3a.q
@@ -37,7 +37,8 @@ CREATE TABLE orc_ppd_staging_n2(t tinyint,
bin binary)
STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
-insert overwrite table orc_ppd_staging_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging_n8 order by t, s;
+insert overwrite table orc_ppd_staging_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from staging_n8 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin;
-- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values
-- which makes it hard to test bloom filters
@@ -59,7 +60,8 @@ CREATE TABLE orc_ppd_n3(t tinyint,
bin binary)
STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
-insert overwrite table orc_ppd_n3 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n2 order by t, s;
+insert overwrite table orc_ppd_n3 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging_n2 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin;
SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter;
SET hive.optimize.index.filter=false;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/orc_schema_evolution_float.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_schema_evolution_float.q b/ql/src/test/queries/clientpositive/orc_schema_evolution_float.q
index c2d9840..ca5dc6f 100644
--- a/ql/src/test/queries/clientpositive/orc_schema_evolution_float.q
+++ b/ql/src/test/queries/clientpositive/orc_schema_evolution_float.q
@@ -1,6 +1,8 @@
set hive.vectorized.execution.enabled=false;
set hive.optimize.index.filter=false;
set hive.metastore.disallow.incompatible.col.type.changes=false;
+-- set this to 'decimal_64' after resolving HIVE-19792
+set hive.vectorized.input.format.supports.enabled=none;
drop table float_text;
create table float_text(f float);
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/orc_split_elimination.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_split_elimination.q b/ql/src/test/queries/clientpositive/orc_split_elimination.q
index 719b21c..03e0e73 100644
--- a/ql/src/test/queries/clientpositive/orc_split_elimination.q
+++ b/ql/src/test/queries/clientpositive/orc_split_elimination.q
@@ -2,7 +2,7 @@ set hive.vectorized.execution.enabled=false;
-- SORT_QUERY_RESULTS
-create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc;
+create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc;
load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim;
@@ -105,7 +105,7 @@ select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=70;
SET hive.optimize.index.filter=false;
-- partitioned table
-create table orc_split_elim_part (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (country string, year int) stored as orc;
+create table orc_split_elim_part (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (country string, year int) stored as orc;
alter table orc_split_elim_part add partition(country='us', year=2000);
alter table orc_split_elim_part add partition(country='us', year=2001);
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q
index 427734f..53c16e0 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q
@@ -12,6 +12,8 @@ set hive.exec.dynamic.partition.mode=nonstrict;
set hive.metastore.disallow.incompatible.col.type.changes=false;
set hive.default.fileformat=orc;
set hive.llap.io.enabled=false;
+-- set this to 'decimal_64' after resolving HIVE-19792
+set hive.vectorized.input.format.supports.enabled=none;
-- SORT_QUERY_RESULTS
--
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive_llap_io.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive_llap_io.q
index 1eca9e3..f2fb2f0 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive_llap_io.q
@@ -13,6 +13,8 @@ set hive.metastore.disallow.incompatible.col.type.changes=false;
set hive.default.fileformat=orc;
set hive.llap.io.enabled=true;
set hive.llap.io.encode.enabled=true;
+-- set this to 'decimal_64' after resolving HIVE-19792
+set hive.vectorized.input.format.supports.enabled=none;
-- SORT_QUERY_RESULTS
--
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q
index 6e35f5a..e811f1d 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive.q
@@ -12,6 +12,8 @@ set hive.exec.dynamic.partition.mode=nonstrict;
set hive.metastore.disallow.incompatible.col.type.changes=false;
set hive.default.fileformat=orc;
set hive.llap.io.enabled=false;
+-- set this to 'decimal_64' after resolving HIVE-19792
+set hive.vectorized.input.format.supports.enabled=none;
-- SORT_QUERY_RESULTS
--
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive_llap_io.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive_llap_io.q
index 576f994..bae6cc8 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_primitive_llap_io.q
@@ -13,6 +13,8 @@ set hive.metastore.disallow.incompatible.col.type.changes=false;
set hive.default.fileformat=orc;
set hive.llap.io.enabled=true;
set hive.llap.io.encode.enabled=true;
+-- set this to 'decimal_64' after resolving HIVE-19792
+set hive.vectorized.input.format.supports.enabled=none;
-- SORT_QUERY_RESULTS
--
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/type_change_test_int.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/type_change_test_int.q b/ql/src/test/queries/clientpositive/type_change_test_int.q
index 112a674..2a49871 100644
--- a/ql/src/test/queries/clientpositive/type_change_test_int.q
+++ b/ql/src/test/queries/clientpositive/type_change_test_int.q
@@ -1,3 +1,6 @@
+-- set this to 'decimal_64' after resolving HIVE-19792
+set hive.vectorized.input.format.supports.enabled=none;
+
-- Create a base table to be used for loading data: Begin
drop table if exists testAltCol_n1;
create table testAltCol_n1
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/type_change_test_int_vectorized.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/type_change_test_int_vectorized.q b/ql/src/test/queries/clientpositive/type_change_test_int_vectorized.q
index 9e93a2f..6a940ac 100644
--- a/ql/src/test/queries/clientpositive/type_change_test_int_vectorized.q
+++ b/ql/src/test/queries/clientpositive/type_change_test_int_vectorized.q
@@ -1,3 +1,5 @@
+-- set this to 'decimal_64' after resolving HIVE-19792
+set hive.vectorized.input.format.supports.enabled=none;
-- Create a base table to be used for loading data: Begin
drop table if exists testAltCol;
create table testAltCol
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/vector_case_when_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_case_when_1.q b/ql/src/test/queries/clientpositive/vector_case_when_1.q
index 0ba17da..8614087 100644
--- a/ql/src/test/queries/clientpositive/vector_case_when_1.q
+++ b/ql/src/test/queries/clientpositive/vector_case_when_1.q
@@ -5,6 +5,8 @@ set hive.explain.user=false;
set hive.fetch.task.conversion=none;
set hive.vectorized.execution.enabled=true;
+-- SORT_QUERY_RESULTS
+
CREATE TABLE lineitem_test_txt (L_ORDERKEY INT,
L_PARTKEY INT,
L_SUPPKEY INT,
@@ -69,8 +71,7 @@ SELECT
IF(L_SUPPKEY > 10000, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE), NULL) AS Field_10,
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
-FROM lineitem_test
-ORDER BY Quantity;
+FROM lineitem_test;
SELECT
L_QUANTITY as Quantity,
CASE
@@ -109,8 +110,7 @@ SELECT
IF(L_SUPPKEY > 10000, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE), NULL) AS Field_10,
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
-FROM lineitem_test
-ORDER BY Quantity;
+FROM lineitem_test;
SET hive.vectorized.if.expr.mode=good;
@@ -153,8 +153,7 @@ SELECT
IF(L_SUPPKEY > 10000, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE), NULL) AS Field_10,
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
-FROM lineitem_test
-ORDER BY Quantity;
+FROM lineitem_test;
SELECT
L_QUANTITY as Quantity,
CASE
@@ -193,8 +192,7 @@ SELECT
IF(L_SUPPKEY > 10000, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE), NULL) AS Field_10,
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
-FROM lineitem_test
-ORDER BY Quantity;
+FROM lineitem_test;
SET hive.vectorized.if.expr.mode=better;
@@ -237,8 +235,7 @@ SELECT
IF(L_SUPPKEY > 10000, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE), NULL) AS Field_10,
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
-FROM lineitem_test
-ORDER BY Quantity;
+FROM lineitem_test;
SELECT
L_QUANTITY as Quantity,
CASE
@@ -277,6 +274,5 @@ SELECT
IF(L_SUPPKEY > 10000, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE), NULL) AS Field_10,
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
-FROM lineitem_test
-ORDER BY Quantity;
-
\ No newline at end of file
+FROM lineitem_test;
+
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/vector_decimal_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_5.q b/ql/src/test/queries/clientpositive/vector_decimal_5.q
index f5de13b..e0956e4 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_5.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_5.q
@@ -21,9 +21,10 @@ SELECT key FROM DECIMAL_5 ORDER BY key;
SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key;
+explain SELECT cast(key as decimal) FROM DECIMAL_5;
SELECT cast(key as decimal) FROM DECIMAL_5;
SELECT cast(key as decimal(6,3)) FROM DECIMAL_5;
DROP TABLE DECIMAL_5_txt;
-DROP TABLE DECIMAL_5;
\ No newline at end of file
+DROP TABLE DECIMAL_5;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q b/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q
index 6e5b5b6..ef769fb 100644
--- a/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q
+++ b/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q
@@ -35,11 +35,13 @@ select t1_n48.`dec`, t2_n29.`dec` from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n2
-- SORT_QUERY_RESULTS
select t1_n48.`dec`, t2_n29.`dec` from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`);
+select count(*) from (select t1_n48.`dec`, t2_n29.`dec` from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t;
explain vectorization detail
select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`);
select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`);
+select count(*) from (select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t;
@@ -72,11 +74,13 @@ select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`
-- SORT_QUERY_RESULTS
select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`);
+select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t;
explain vectorization detail
select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`);
select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`);
+select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t;
set hive.vectorized.input.format.supports.enabled=none;
@@ -87,9 +91,11 @@ select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`
-- SORT_QUERY_RESULTS
select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`);
+select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t;
explain vectorization detail
select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`);
select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`);
+select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t;
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/queries/clientpositive/vectorized_dynamic_semijoin_reduction2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vectorized_dynamic_semijoin_reduction2.q b/ql/src/test/queries/clientpositive/vectorized_dynamic_semijoin_reduction2.q
index 7998035..743e8db 100644
--- a/ql/src/test/queries/clientpositive/vectorized_dynamic_semijoin_reduction2.q
+++ b/ql/src/test/queries/clientpositive/vectorized_dynamic_semijoin_reduction2.q
@@ -39,7 +39,7 @@ EXPLAIN select count(*) from dsrv2_big a join dsrv2_small b on (a.partkey_bigint
select count(*) from dsrv2_big a join dsrv2_small b on (a.partkey_bigint = b.partkey_bigint);
-- single key (decimal)
-EXPLAIN select count(*) from dsrv2_big a join dsrv2_small b on (a.partkey_decimal = b.partkey_decimal);
+EXPLAIN VECTORIZATION DETAIL select count(*) from dsrv2_big a join dsrv2_small b on (a.partkey_decimal = b.partkey_decimal);
select count(*) from dsrv2_big a join dsrv2_small b on (a.partkey_decimal = b.partkey_decimal);
-- single key (double)
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out b/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
index eb4a8cb..80bbba4 100644
--- a/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
@@ -1144,8 +1144,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1342,8 +1342,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1511,8 +1511,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1541,8 +1541,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1929,8 +1929,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2128,8 +2128,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2298,8 +2298,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2328,8 +2328,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
index 957dfd8..66bb2db 100644
--- a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
@@ -665,22 +665,22 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: over10k_orc_bucketed
- Statistics: Num rows: 1247 Data size: 713720 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 1237 Data size: 707880 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: ROW__ID
- Statistics: Num rows: 1247 Data size: 713720 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 1237 Data size: 707880 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
keys: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
mode: hash
outputColumnNames: _col0, _col1
- Statistics: Num rows: 623 Data size: 52332 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
- Statistics: Num rows: 623 Data size: 52332 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Execution mode: llap
LLAP IO: may be used (ACID table)
@@ -692,13 +692,13 @@ STAGE PLANS:
keys: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
mode: mergepartial
outputColumnNames: _col0, _col1
- Statistics: Num rows: 623 Data size: 52332 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (_col1 > 1L) (type: boolean)
- Statistics: Num rows: 207 Data size: 17388 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 207 Data size: 17388 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
index 84477c3..7a880dd 100644
--- a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
+++ b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
@@ -3233,19 +3233,19 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid_uami_n1
- Statistics: Num rows: 267 Data size: 83640 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 281 Data size: 87904 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (((de = 109.23) or (de = 119.23)) and enforce_constraint(vc is not null)) (type: boolean)
- Statistics: Num rows: 5 Data size: 1566 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), i (type: int), vc (type: varchar(128))
outputColumnNames: _col0, _col1, _col3
- Statistics: Num rows: 5 Data size: 1566 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 5 Data size: 1566 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: int), _col3 (type: varchar(128))
Execution mode: vectorized, llap
LLAP IO: may be used (ACID table)
@@ -3255,10 +3255,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: int), 3.14 (type: decimal(5,2)), VALUE._col1 (type: varchar(128))
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 5 Data size: 1566 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 5 Data size: 1566 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -3326,7 +3326,7 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid_uami_n1
- Statistics: Num rows: 305 Data size: 95448 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 320 Data size: 100040 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: ((de = 3.14) and enforce_constraint((i is not null and vc is not null))) (type: boolean)
Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE
[02/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_outer_join6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join6.q.out b/ql/src/test/results/clientpositive/vector_outer_join6.q.out
index e2d6cc8..7151965 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join6.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join6.q.out
@@ -130,7 +130,7 @@ POSTHOOK: query: explain vectorization detail formatted
select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from
(select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1
POSTHOOK: type: QUERY
-{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-7":{"ROOT STAGE":"TRUE"},"Stage-5":{"DEPENDENT STAGES":"Stage-7"},"Stage-0":{"DEPENDENT STAGES":"Stage-5"}},"STAGE PLANS":{"Stage-7":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_1:tjoin3":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"TableScan":{"alias:":"tjoin2_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin2_n0","isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","Operato
rId:":"SEL_1","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"OperatorId:":"HASHTABLESINK_21"}}}}}},"$hdt$_1:tjoin3":{"TableScan":{"alias:":"tjoin3","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin3","isTempTable:":"false","OperatorId:":"TS_8","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_9","children":{"HashTable Sink Operator":{"keys:":{"0":"_col2 (type: int)","1":"_col1 (type: int)"},"OperatorId:":"HASHTABLESINK_19"}}}}}}}}},"Stage-5":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"tjoin1_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE C
olumn stats: NONE","table:":"tjoin1_n0","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:rnum:int, 1:c1:int, 2:c2:int, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1]"},"Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_23","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col2":"1:_col0"},"condition map:":[{"":"Right Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 0:int"],"className:":"VectorMapJoinOp
erator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col2"],"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_24","children":{"Select Operator":{"expressions:":"_col2 (type: int), _col0 (type: int), _col1 (type: int)","columnExprMap:":{"_col0":"_col2","_col1":"_col0","_col2":"_col1"},"outputColumnNames:":["_col0","_col1","_col2"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 0, 1]"},"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SE
L_25","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col3":"1:_col0"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col2 (type: int)","1":"_col1 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 2:int","col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col3"],"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_26","children":{"Select Operator":{"expressions:":"_col0 (type: int), _col
1 (type: int), _col3 (type: int)","columnExprMap:":{"_col0":"_col0","_col1":"_col1","_col2":"_col3"},"outputColumnNames:":["_col0","_col1","_col2"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1, 2]"},"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_27","children":{"File Output Operator":{"compressed:":"false","File Sink Vectorization:":{"className:":"VectorFileSinkOperator","native:":"false"},"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_28"}}}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format
IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"3","includeColumns:":"[0, 1]","dataColumns:":["rnum:int","c1:int","c2:int"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[bigint, bigint]"}},"Local Work:":{"Map Reduce Local Work":{}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_29"}}}}}}
+{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-7":{"ROOT STAGE":"TRUE"},"Stage-5":{"DEPENDENT STAGES":"Stage-7"},"Stage-0":{"DEPENDENT STAGES":"Stage-5"}},"STAGE PLANS":{"Stage-7":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_1:tjoin3":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"TableScan":{"alias:":"tjoin2_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin2_n0","isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","Operato
rId:":"SEL_1","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"OperatorId:":"HASHTABLESINK_21"}}}}}},"$hdt$_1:tjoin3":{"TableScan":{"alias:":"tjoin3","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin3","isTempTable:":"false","OperatorId:":"TS_8","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_9","children":{"HashTable Sink Operator":{"keys:":{"0":"_col2 (type: int)","1":"_col1 (type: int)"},"OperatorId:":"HASHTABLESINK_19"}}}}}}}}},"Stage-5":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"tjoin1_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE C
olumn stats: NONE","table:":"tjoin1_n0","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:rnum:int, 1:c1:int, 2:c2:int, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1]"},"Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_23","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col2":"1:_col0"},"condition map:":[{"":"Right Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 0:int"],"className:":"VectorMapJoinOp
erator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col2"],"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_24","children":{"Select Operator":{"expressions:":"_col2 (type: int), _col0 (type: int), _col1 (type: int)","columnExprMap:":{"_col0":"_col2","_col1":"_col0","_col2":"_col1"},"outputColumnNames:":["_col0","_col1","_col2"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 0, 1]"},"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SE
L_25","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col3":"1:_col0"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col2 (type: int)","1":"_col1 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 2:int","col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col3"],"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_26","children":{"Select Operator":{"expressions:":"_col0 (type: int), _col
1 (type: int), _col3 (type: int)","columnExprMap:":{"_col0":"_col0","_col1":"_col1","_col2":"_col3"},"outputColumnNames:":["_col0","_col1","_col2"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1, 2]"},"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_27","children":{"File Output Operator":{"compressed:":"false","File Sink Vectorization:":{"className:":"VectorFileSinkOperator","native:":"false"},"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_28"}}}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format
IS true"],"inputFormatFeatureSupport:":"[DECIMAL_64]","featureSupportInUse:":"[DECIMAL_64]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"3","includeColumns:":"[0, 1]","dataColumns:":["rnum:int","c1:int","c2:int"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[bigint, bigint]"}},"Local Work:":{"Map Reduce Local Work":{}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_29"}}}}}}
PREHOOK: query: select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from
(select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1
PREHOOK: type: QUERY
@@ -157,7 +157,7 @@ POSTHOOK: query: explain vectorization detail formatted
select tj1rnum, tj2rnum as rnumt3 from
(select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1
POSTHOOK: type: QUERY
-{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-7":{"ROOT STAGE":"TRUE"},"Stage-5":{"DEPENDENT STAGES":"Stage-7"},"Stage-0":{"DEPENDENT STAGES":"Stage-5"}},"STAGE PLANS":{"Stage-7":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_1:tjoin3":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"TableScan":{"alias:":"tjoin2_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin2_n0","isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","Operato
rId:":"SEL_1","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"OperatorId:":"HASHTABLESINK_21"}}}}}},"$hdt$_1:tjoin3":{"TableScan":{"alias:":"tjoin3","columns:":["c1"],"database:":"default","Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin3","isTempTable:":"false","OperatorId:":"TS_8","children":{"Select Operator":{"expressions:":"c1 (type: int)","columnExprMap:":{"_col0":"c1"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_9","children":{"HashTable Sink Operator":{"keys:":{"0":"_col2 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_19"}}}}}}}}},"Stage-5":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"tjoin1_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin1_n0","TableSc
an Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:rnum:int, 1:c1:int, 2:c2:int, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1]"},"Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_23","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col2":"1:_col0"},"condition map:":[{"":"Right Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:"
:["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col2"],"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_24","children":{"Select Operator":{"expressions:":"_col2 (type: int), _col0 (type: int), _col1 (type: int)","columnExprMap:":{"_col0":"_col2","_col1":"_col0","_col2":"_col1"},"outputColumnNames:":["_col0","_col1","_col2"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 0, 1]"},"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_25","children":{"Map Join Operator":{"columnEx
prMap:":{"_col0":"0:_col0","_col1":"0:_col1"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col2 (type: int)","1":"_col0 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 2:int","col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_26","children":{"File Output Operator":{"compressed:":"false","File Sink Vectorization:":{"className:":"VectorFileSinkOperator","native:":"false"},"
Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_27"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"3","includeColumns:":"[0, 1]","dataColumns:":["rnum:int","c1:int","c2:int"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[bigint, bigint]"}},"Local Work:":{"Map Reduce Local Work":{}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"List
Sink":{"OperatorId:":"LIST_SINK_28"}}}}}}
+{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-7":{"ROOT STAGE":"TRUE"},"Stage-5":{"DEPENDENT STAGES":"Stage-7"},"Stage-0":{"DEPENDENT STAGES":"Stage-5"}},"STAGE PLANS":{"Stage-7":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_1:tjoin3":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_0:$hdt$_0:tjoin2_n0":{"TableScan":{"alias:":"tjoin2_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin2_n0","isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE","Operato
rId:":"SEL_1","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"OperatorId:":"HASHTABLESINK_21"}}}}}},"$hdt$_1:tjoin3":{"TableScan":{"alias:":"tjoin3","columns:":["c1"],"database:":"default","Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin3","isTempTable:":"false","OperatorId:":"TS_8","children":{"Select Operator":{"expressions:":"c1 (type: int)","columnExprMap:":{"_col0":"c1"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_9","children":{"HashTable Sink Operator":{"keys:":{"0":"_col2 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_19"}}}}}}}}},"Stage-5":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"tjoin1_n0","columns:":["rnum","c1"],"database:":"default","Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","table:":"tjoin1_n0","TableSc
an Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:rnum:int, 1:c1:int, 2:c2:int, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"rnum (type: int), c1 (type: int)","columnExprMap:":{"_col0":"rnum","_col1":"c1"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1]"},"Statistics:":"Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_23","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col2":"1:_col0"},"condition map:":[{"":"Right Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: int)","1":"_col1 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:"
:["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col2"],"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_24","children":{"Select Operator":{"expressions:":"_col2 (type: int), _col0 (type: int), _col1 (type: int)","columnExprMap:":{"_col0":"_col2","_col1":"_col0","_col2":"_col1"},"outputColumnNames:":["_col0","_col1","_col2"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 0, 1]"},"Statistics:":"Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_25","children":{"Map Join Operator":{"columnEx
prMap:":{"_col0":"0:_col0","_col1":"0:_col1"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col2 (type: int)","1":"_col0 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 1:int"],"bigTableValueExpressions:":["col 2:int","col 0:int"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_26","children":{"File Output Operator":{"compressed:":"false","File Sink Vectorization:":{"className:":"VectorFileSinkOperator","native:":"false"},"
Statistics:":"Num rows: 4 Data size: 449 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_27"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[DECIMAL_64]","featureSupportInUse:":"[DECIMAL_64]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"3","includeColumns:":"[0, 1]","dataColumns:":["rnum:int","c1:int","c2:int"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[bigint, bigint]"}},"Local Work:":{"Map Reduce Local Work":{}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Pro
cessor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_28"}}}}}}
PREHOOK: query: select tj1rnum, tj2rnum as rnumt3 from
(select tjoin1_n0.rnum tj1rnum, tjoin2_n0.rnum tj2rnum, tjoin2_n0.c1 tj2c1 from tjoin1_n0 left outer join tjoin2_n0 on tjoin1_n0.c1 = tjoin2_n0.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1
PREHOOK: type: QUERY
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_outer_join_no_keys.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join_no_keys.q.out b/ql/src/test/results/clientpositive/vector_outer_join_no_keys.q.out
index 7454c4b..750ef5c 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join_no_keys.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join_no_keys.q.out
@@ -116,8 +116,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -261,8 +261,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_reduce1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce1.q.out b/ql/src/test/results/clientpositive/vector_reduce1.q.out
index 8be8dba..99bdfe6 100644
--- a/ql/src/test/results/clientpositive/vector_reduce1.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce1.q.out
@@ -145,8 +145,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_reduce2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce2.q.out b/ql/src/test/results/clientpositive/vector_reduce2.q.out
index 7799746..4bc7bf5 100644
--- a/ql/src/test/results/clientpositive/vector_reduce2.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce2.q.out
@@ -145,8 +145,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_reduce3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce3.q.out b/ql/src/test/results/clientpositive/vector_reduce3.q.out
index a7ad970..22923ee 100644
--- a/ql/src/test/results/clientpositive/vector_reduce3.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce3.q.out
@@ -145,8 +145,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
index 15f3b09..a4048bd 100644
--- a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
@@ -83,8 +83,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_string_concat.q.out b/ql/src/test/results/clientpositive/vector_string_concat.q.out
index a61c6f0..68b011d 100644
--- a/ql/src/test/results/clientpositive/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/vector_string_concat.q.out
@@ -151,8 +151,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -369,8 +369,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_struct_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_struct_in.q.out b/ql/src/test/results/clientpositive/vector_struct_in.q.out
index 66dd49a..265825b 100644
--- a/ql/src/test/results/clientpositive/vector_struct_in.q.out
+++ b/ql/src/test/results/clientpositive/vector_struct_in.q.out
@@ -90,8 +90,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -203,8 +203,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -340,8 +340,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -453,8 +453,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -590,8 +590,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -703,8 +703,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -843,8 +843,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -959,8 +959,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out b/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
index ae13ae6..ef20c86 100644
--- a/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
+++ b/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out
@@ -47,8 +47,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -142,8 +142,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_udf3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_udf3.q.out b/ql/src/test/results/clientpositive/vector_udf3.q.out
index e230c0e..27dde3e 100644
--- a/ql/src/test/results/clientpositive/vector_udf3.q.out
+++ b/ql/src/test/results/clientpositive/vector_udf3.q.out
@@ -48,8 +48,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_varchar_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_varchar_4.q.out b/ql/src/test/results/clientpositive/vector_varchar_4.q.out
index 24016b2..0ffb777 100644
--- a/ql/src/test/results/clientpositive/vector_varchar_4.q.out
+++ b/ql/src/test/results/clientpositive/vector_varchar_4.q.out
@@ -174,8 +174,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out b/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out
index 282aec4..29bc9df 100644
--- a/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out
@@ -191,8 +191,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -300,8 +300,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -411,8 +411,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
index f3aec13..154e752 100644
--- a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
+++ b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out
@@ -84,8 +84,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -193,8 +193,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -316,8 +316,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_when_case_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_when_case_null.q.out b/ql/src/test/results/clientpositive/vector_when_case_null.q.out
index 3ce7b41..2cdbe38 100644
--- a/ql/src/test/results/clientpositive/vector_when_case_null.q.out
+++ b/ql/src/test/results/clientpositive/vector_when_case_null.q.out
@@ -77,8 +77,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_1.q.out b/ql/src/test/results/clientpositive/vectorization_1.q.out
index 767db3c..6801978 100644
--- a/ql/src/test/results/clientpositive/vectorization_1.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_1.q.out
@@ -99,8 +99,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_10.q.out b/ql/src/test/results/clientpositive/vectorization_10.q.out
index 57a6fe9..69aa608 100644
--- a/ql/src/test/results/clientpositive/vectorization_10.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_10.q.out
@@ -94,8 +94,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_11.q.out b/ql/src/test/results/clientpositive/vectorization_11.q.out
index 5792d0d..06cde65 100644
--- a/ql/src/test/results/clientpositive/vectorization_11.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_11.q.out
@@ -76,8 +76,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_12.q.out b/ql/src/test/results/clientpositive/vectorization_12.q.out
index 39c3476..527d741 100644
--- a/ql/src/test/results/clientpositive/vectorization_12.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_12.q.out
@@ -126,8 +126,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_13.q.out b/ql/src/test/results/clientpositive/vectorization_13.q.out
index 1cf64a3..cae441e 100644
--- a/ql/src/test/results/clientpositive/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_13.q.out
@@ -128,8 +128,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -458,8 +458,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_14.q.out b/ql/src/test/results/clientpositive/vectorization_14.q.out
index 9dbc20c..86be72e 100644
--- a/ql/src/test/results/clientpositive/vectorization_14.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_14.q.out
@@ -128,8 +128,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_15.q.out b/ql/src/test/results/clientpositive/vectorization_15.q.out
index 589b8f5..db6dea1 100644
--- a/ql/src/test/results/clientpositive/vectorization_15.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_15.q.out
@@ -124,8 +124,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_16.q.out b/ql/src/test/results/clientpositive/vectorization_16.q.out
index 18120f2..0d58e55 100644
--- a/ql/src/test/results/clientpositive/vectorization_16.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_16.q.out
@@ -101,8 +101,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_17.q.out b/ql/src/test/results/clientpositive/vectorization_17.q.out
index 6c9212f..b782e3a 100644
--- a/ql/src/test/results/clientpositive/vectorization_17.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_17.q.out
@@ -94,8 +94,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_2.q.out b/ql/src/test/results/clientpositive/vectorization_2.q.out
index 05f22bb..be7c843 100644
--- a/ql/src/test/results/clientpositive/vectorization_2.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_2.q.out
@@ -103,8 +103,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_3.q.out b/ql/src/test/results/clientpositive/vectorization_3.q.out
index b5ee0fb..7bfb313 100644
--- a/ql/src/test/results/clientpositive/vectorization_3.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_3.q.out
@@ -108,8 +108,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_4.q.out b/ql/src/test/results/clientpositive/vectorization_4.q.out
index 6dd398d..0700753 100644
--- a/ql/src/test/results/clientpositive/vectorization_4.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_4.q.out
@@ -103,8 +103,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_5.q.out b/ql/src/test/results/clientpositive/vectorization_5.q.out
index dc539e0..6c82694 100644
--- a/ql/src/test/results/clientpositive/vectorization_5.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_5.q.out
@@ -96,8 +96,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_6.q.out b/ql/src/test/results/clientpositive/vectorization_6.q.out
index 6d17130..1c6e0c5 100644
--- a/ql/src/test/results/clientpositive/vectorization_6.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_6.q.out
@@ -88,8 +88,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_7.q.out b/ql/src/test/results/clientpositive/vectorization_7.q.out
index 3999bf2..a1eb6f1 100644
--- a/ql/src/test/results/clientpositive/vectorization_7.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_7.q.out
@@ -100,8 +100,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -320,8 +320,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_8.q.out b/ql/src/test/results/clientpositive/vectorization_8.q.out
index f658f80..3172f65 100644
--- a/ql/src/test/results/clientpositive/vectorization_8.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_8.q.out
@@ -96,8 +96,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -303,8 +303,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_9.q.out b/ql/src/test/results/clientpositive/vectorization_9.q.out
index 18120f2..0d58e55 100644
--- a/ql/src/test/results/clientpositive/vectorization_9.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_9.q.out
@@ -101,8 +101,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out b/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out
index 44e4632..b1f4bdc 100644
--- a/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out
@@ -68,8 +68,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_limit.q.out b/ql/src/test/results/clientpositive/vectorization_limit.q.out
index 438c060..cd256c6 100644
--- a/ql/src/test/results/clientpositive/vectorization_limit.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_limit.q.out
@@ -36,8 +36,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -137,8 +137,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -266,8 +266,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -446,8 +446,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -750,8 +750,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_nested_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_nested_udf.q.out b/ql/src/test/results/clientpositive/vectorization_nested_udf.q.out
index c0677bd..b4fe31a 100644
--- a/ql/src/test/results/clientpositive/vectorization_nested_udf.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_nested_udf.q.out
@@ -56,8 +56,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_offset_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_offset_limit.q.out b/ql/src/test/results/clientpositive/vectorization_offset_limit.q.out
index 18d236c..5920b3d 100644
--- a/ql/src/test/results/clientpositive/vectorization_offset_limit.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_offset_limit.q.out
@@ -40,8 +40,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -117,8 +117,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_part_project.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_part_project.q.out b/ql/src/test/results/clientpositive/vectorization_part_project.q.out
index e34bb24..50052fd 100644
--- a/ql/src/test/results/clientpositive/vectorization_part_project.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_part_project.q.out
@@ -78,8 +78,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorization_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_pushdown.q.out b/ql/src/test/results/clientpositive/vectorization_pushdown.q.out
index 6216581..e962362 100644
--- a/ql/src/test/results/clientpositive/vectorization_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_pushdown.q.out
@@ -39,8 +39,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_case.q.out b/ql/src/test/results/clientpositive/vectorized_case.q.out
index 9c7339e..7847363 100644
--- a/ql/src/test/results/clientpositive/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_case.q.out
@@ -82,8 +82,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -227,8 +227,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -310,8 +310,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -425,8 +425,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -517,7 +517,7 @@ STAGE PLANS:
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:member:decimal(10,0), 1:attr:decimal(10,0), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:member:decimal(10,0)/DECIMAL_64, 1:attr:decimal(10,0)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE ((attr + 2)) END (type: decimal(11,0))
outputColumnNames: _col0
@@ -525,7 +525,7 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [6]
- selectExpressions: IfExprCondExprCondExpr(col 3:boolean, col 4:decimal(11,0)col 5:decimal(11,0))(children: VectorUDFAdaptor((member = 1)) -> 3:boolean, DecimalColAddDecimalScalar(col 1:decimal(10,0), val 1) -> 4:decimal(11,0), DecimalColAddDecimalScalar(col 1:decimal(10,0), val 2) -> 5:decimal(11,0)) -> 6:decimal(11,0)
+ selectExpressions: IfExprCondExprCondExpr(col 3:boolean, col 7:decimal(11,0)col 8:decimal(11,0))(children: VectorUDFAdaptor((member = 1)) -> 3:boolean, ConvertDecimal64ToDecimal(col 4:decimal(11,0)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,0)/DECIMAL_64, decimal64Val 1, decimalVal 1) -> 4:decimal(11,0)/DECIMAL_64) -> 7:decimal(11,0), ConvertDecimal64ToDecimal(col 5:decimal(11,0)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,0)/DECIMAL_64, decimal64Val 2, decimalVal 2) -> 5:decimal(11,0)/DECIMAL_64) -> 8:decimal(11,0)) -> 6:decimal(11,0)
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -541,8 +541,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -550,9 +550,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: member:decimal(10,0), attr:decimal(10,0)
+ dataColumns: member:decimal(10,0)/DECIMAL_64, attr:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, decimal(11,0), decimal(11,0), decimal(11,0)]
+ scratchColumnTypeNames: [bigint, decimal(11,0)/DECIMAL_64, decimal(11,0)/DECIMAL_64, decimal(11,0), decimal(11,0), decimal(11,0)]
Stage: Stage-0
Fetch Operator
@@ -594,15 +594,15 @@ STAGE PLANS:
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:member:decimal(10,0), 1:attr:decimal(10,0), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:member:decimal(10,0)/DECIMAL_64, 1:attr:decimal(10,0)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: CASE WHEN ((member = 1)) THEN (1) ELSE ((attr + 2)) END (type: decimal(11,0))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [6]
- selectExpressions: IfExprColumnCondExpr(col 3:boolean, col 4:decimal(1,0)col 5:decimal(11,0))(children: VectorUDFAdaptor((member = 1)) -> 3:boolean, ConstantVectorExpression(val 1) -> 4:decimal(1,0), DecimalColAddDecimalScalar(col 1:decimal(10,0), val 2) -> 5:decimal(11,0)) -> 6:decimal(11,0)
+ projectedOutputColumnNums: [8]
+ selectExpressions: VectorUDFAdaptor(CASE WHEN ((member = 1)) THEN (1) ELSE ((attr + 2)) END)(children: VectorUDFAdaptor((member = 1)) -> 6:boolean, ConvertDecimal64ToDecimal(col 7:decimal(11,0)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,0)/DECIMAL_64, decimal64Val 2, decimalVal 2) -> 7:decimal(11,0)/DECIMAL_64) -> 9:decimal(11,0)) -> 8:decimal(11,0)
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -618,8 +618,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -627,9 +627,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: member:decimal(10,0), attr:decimal(10,0)
+ dataColumns: member:decimal(10,0)/DECIMAL_64, attr:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, decimal(1,0), decimal(11,0), decimal(11,0)]
+ scratchColumnTypeNames: [bigint, decimal(1,0), decimal(11,0)/DECIMAL_64, bigint, decimal(11,0)/DECIMAL_64, decimal(11,0), decimal(11,0)]
Stage: Stage-0
Fetch Operator
@@ -671,15 +671,15 @@ STAGE PLANS:
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:member:decimal(10,0), 1:attr:decimal(10,0), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:member:decimal(10,0)/DECIMAL_64, 1:attr:decimal(10,0)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE (2) END (type: decimal(11,0))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [6]
- selectExpressions: IfExprCondExprColumn(col 3:boolean, col 4:decimal(11,0), col 5:decimal(1,0))(children: VectorUDFAdaptor((member = 1)) -> 3:boolean, DecimalColAddDecimalScalar(col 1:decimal(10,0), val 1) -> 4:decimal(11,0), ConstantVectorExpression(val 2) -> 5:decimal(1,0)) -> 6:decimal(11,0)
+ projectedOutputColumnNums: [8]
+ selectExpressions: VectorUDFAdaptor(CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE (2) END)(children: VectorUDFAdaptor((member = 1)) -> 6:boolean, ConvertDecimal64ToDecimal(col 7:decimal(11,0)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,0)/DECIMAL_64, decimal64Val 1, decimalVal 1) -> 7:decimal(11,0)/DECIMAL_64) -> 9:decimal(11,0)) -> 8:decimal(11,0)
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -695,8 +695,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -704,9 +704,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: member:decimal(10,0), attr:decimal(10,0)
+ dataColumns: member:decimal(10,0)/DECIMAL_64, attr:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, decimal(11,0), decimal(1,0), decimal(11,0)]
+ scratchColumnTypeNames: [bigint, decimal(11,0)/DECIMAL_64, decimal(1,0), bigint, decimal(11,0)/DECIMAL_64, decimal(11,0), decimal(11,0)]
Stage: Stage-0
Fetch Operator
@@ -790,8 +790,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -867,8 +867,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -944,8 +944,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_casts.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_casts.q.out b/ql/src/test/results/clientpositive/vectorized_casts.q.out
index c79d8d7..608e85a 100644
--- a/ql/src/test/results/clientpositive/vectorized_casts.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_casts.q.out
@@ -196,8 +196,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vectorized_context.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_context.q.out b/ql/src/test/results/clientpositive/vectorized_context.q.out
index 539223b..e322163 100644
--- a/ql/src/test/results/clientpositive/vectorized_context.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_context.q.out
@@ -194,8 +194,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
[15/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out
index 5466297..42e9694 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_navfn.q.out
@@ -100,8 +100,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -226,7 +225,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 304 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: d (type: double), dec (type: decimal(4,2))
sort order: ++
@@ -246,8 +245,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -255,7 +253,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [5, 7, 9]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -485,7 +483,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 340 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: bin (type: binary), d (type: double), i (type: int)
sort order: ++-
@@ -505,8 +503,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -514,7 +511,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 5, 7, 10]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -710,7 +707,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: i (type: int), s (type: string), dec (type: decimal(4,2))
sort order: +++
@@ -729,8 +726,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -738,7 +734,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 7, 9]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -934,7 +930,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: d (type: double), f (type: float)
sort order: ++
@@ -954,8 +950,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -963,7 +958,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [0, 4, 5, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1193,7 +1188,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: bo (type: boolean), s (type: string)
sort order: ++
@@ -1212,8 +1207,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1221,7 +1215,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [6, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1418,7 +1412,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -1446,8 +1440,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1455,7 +1448,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [0, 2, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: [bigint, bigint]
Reducer 2
@@ -1639,8 +1632,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1812,8 +1804,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1985,8 +1976,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2158,8 +2148,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out
index f950c4c..91b52e7 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out
@@ -82,7 +82,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: i (type: int), s (type: string), b (type: bigint)
sort order: +++
@@ -101,8 +101,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -110,7 +109,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 3, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -215,7 +214,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: d (type: double), s (type: string), f (type: float)
sort order: ++-
@@ -234,8 +233,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -243,7 +241,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [4, 5, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -348,7 +346,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 228 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: ts (type: timestamp), f (type: float)
sort order: ++
@@ -368,8 +366,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -377,7 +374,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [4, 7, 8]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -482,7 +479,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: t (type: tinyint), s (type: string), d (type: double)
sort order: ++-
@@ -501,8 +498,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -510,7 +506,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [0, 5, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -615,7 +611,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 228 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: ts (type: timestamp), s (type: string)
sort order: ++
@@ -635,8 +631,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -644,7 +639,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 7, 8]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -785,7 +780,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: s (type: string), i (type: int)
sort order: +-
@@ -805,8 +800,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -814,7 +808,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 5, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -950,7 +944,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: s (type: string), i (type: int)
sort order: +-
@@ -970,8 +964,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -979,7 +972,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 5, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1115,7 +1108,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: s (type: string), i (type: int)
sort order: ++
@@ -1135,8 +1128,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1144,7 +1136,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 5, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing_range_multiorder.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_range_multiorder.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_range_multiorder.q.out
index 3a1b9c5e..782bd9b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_range_multiorder.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_range_multiorder.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: si (type: smallint), i (type: int), b (type: bigint)
sort order: +++
@@ -94,8 +94,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -103,7 +102,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [0, 1, 2, 3]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -333,7 +332,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: si (type: smallint), bo (type: boolean), i (type: int), f (type: float)
sort order: +++-
@@ -352,8 +351,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -361,7 +359,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [1, 2, 4, 6]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -556,7 +554,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: si (type: smallint), bo (type: boolean), i (type: int), f (type: float)
sort order: +++-
@@ -575,8 +573,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -584,7 +581,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [1, 2, 4, 6]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -779,7 +776,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: s (type: string)
sort order: +
@@ -798,8 +795,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -807,7 +803,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [1, 2, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -10929,7 +10925,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: s (type: string), si (type: smallint), i (type: int)
sort order: +++
@@ -10948,8 +10944,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -10957,7 +10952,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [1, 2, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -11187,7 +11182,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: s (type: string), si (type: smallint), i (type: int)
sort order: +++
@@ -11206,8 +11201,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -11215,7 +11209,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [1, 2, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -11445,7 +11439,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: s (type: string), si (type: smallint), i (type: int)
sort order: ++-
@@ -11464,8 +11458,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -11473,7 +11466,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [1, 2, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -11703,7 +11696,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: si (type: smallint), bo (type: boolean), i (type: int), f (type: float)
sort order: +++-
@@ -11722,8 +11715,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -11731,7 +11723,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [1, 2, 4, 6]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -11961,7 +11953,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: i (type: int), bo (type: boolean), b (type: bigint)
sort order: +++
@@ -11980,8 +11972,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -11989,7 +11980,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 3, 6]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -12185,7 +12176,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: i (type: int), CAST( s AS CHAR(12)) (type: char(12))
sort order: ++
@@ -12206,8 +12197,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -12215,7 +12205,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: [string]
Reducer 2
@@ -12411,7 +12401,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: i (type: int), CAST( s AS varchar(12)) (type: varchar(12))
sort order: ++
@@ -12432,8 +12422,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -12441,7 +12430,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: [string]
Reducer 2
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing_rank.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_rank.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_rank.q.out
index 2bf3b07..ff7cf6c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_rank.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_rank.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: f (type: float), t (type: tinyint)
sort order: ++
@@ -94,8 +94,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -103,7 +102,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [0, 4, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -334,7 +333,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 228 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: ts (type: timestamp), i (type: int), s (type: string)
sort order: ++-
@@ -353,8 +352,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -362,7 +360,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 7, 8]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -558,7 +556,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: bo (type: boolean), b (type: bigint), s (type: string)
sort order: +++
@@ -577,8 +575,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -586,7 +583,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [3, 6, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -782,7 +779,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: dec (type: decimal(4,2)), f (type: float)
sort order: ++
@@ -802,8 +799,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -811,7 +807,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [4, 7, 9]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1028,7 +1024,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 160 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -1062,8 +1058,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1071,7 +1066,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [3, 8, 9]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Map 4
@@ -1081,7 +1076,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -1114,8 +1109,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1123,7 +1117,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [3]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1329,7 +1323,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 160 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -1363,8 +1357,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1372,7 +1365,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [3, 8, 9]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Map 4
@@ -1382,7 +1375,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -1415,8 +1408,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1424,7 +1416,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [3]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1632,7 +1624,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 164 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -1666,8 +1658,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1675,7 +1666,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [0, 3, 8, 9]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Map 4
@@ -1685,7 +1676,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -1718,8 +1709,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1727,7 +1717,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [3]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out
index e120391..55899ef 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_streaming.q.out
@@ -95,8 +95,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -242,8 +241,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -425,7 +423,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -450,8 +448,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -459,7 +456,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [0, 4]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -853,8 +850,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
[35/67] [abbrv] hive git commit: HIVE-19876: Multiple fixes for
Driver.isValidTxnListState (Jesus Camacho Rodriguez,
reviewed by Eugene Koifman)
Posted by se...@apache.org.
HIVE-19876: Multiple fixes for Driver.isValidTxnListState (Jesus Camacho Rodriguez, reviewed by Eugene Koifman)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b1004830
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b1004830
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b1004830
Branch: refs/heads/master-txnstats
Commit: b1004830ec95a74112ce37308d251b0366030824
Parents: dd51259
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Thu Jun 14 20:52:33 2018 -0700
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Sat Jun 16 16:46:48 2018 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/hive/ql/Context.java | 1 +
.../java/org/apache/hadoop/hive/ql/Driver.java | 30 +++++++++++---------
2 files changed, 17 insertions(+), 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/b1004830/ql/src/java/org/apache/hadoop/hive/ql/Context.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index e4e3d48..bb41e98 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -356,6 +356,7 @@ public class Context {
this.executionIndex = ctx.executionIndex;
this.viewsTokenRewriteStreams = new HashMap<>();
this.rewrittenStatementContexts = new HashSet<>();
+ this.opContext = new CompilationOpContext();
}
public Map<String, Path> getFsScratchDirs() {
http://git-wip-us.apache.org/repos/asf/hive/blob/b1004830/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index abeb7fc..43a78ca 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -815,33 +815,35 @@ public class Driver implements IDriver {
// The lock may have multiple components, e.g., DbHiveLock, hence we need
// to check for each of them
for (LockComponent lckCmp : lock.getHiveLockComponents()) {
- if (lckCmp.getType() == LockType.EXCLUSIVE ||
- lckCmp.getType() == LockType.SHARED_WRITE) {
+ // We only consider tables for which we hold either an exclusive
+ // or a shared write lock
+ if ((lckCmp.getType() == LockType.EXCLUSIVE ||
+ lckCmp.getType() == LockType.SHARED_WRITE) &&
+ lckCmp.getTablename() != null) {
nonSharedLocks.add(
Warehouse.getQualifiedName(
lckCmp.getDbname(), lckCmp.getTablename()));
}
}
} else {
- // The lock has a single components, e.g., SimpleHiveLock or ZooKeeperHiveLock
- if (lock.getHiveLockMode() == HiveLockMode.EXCLUSIVE ||
- lock.getHiveLockMode() == HiveLockMode.SEMI_SHARED) {
- if (lock.getHiveLockObject().getPaths().length == 2) {
- // Pos 0 of lock paths array contains dbname, pos 1 contains tblname
- nonSharedLocks.add(
- Warehouse.getQualifiedName(
- lock.getHiveLockObject().getPaths()[0], lock.getHiveLockObject().getPaths()[1]));
- }
+ // The lock has a single components, e.g., SimpleHiveLock or ZooKeeperHiveLock.
+ // Pos 0 of lock paths array contains dbname, pos 1 contains tblname
+ if ((lock.getHiveLockMode() == HiveLockMode.EXCLUSIVE ||
+ lock.getHiveLockMode() == HiveLockMode.SEMI_SHARED) &&
+ lock.getHiveLockObject().getPaths().length == 2) {
+ nonSharedLocks.add(
+ Warehouse.getQualifiedName(
+ lock.getHiveLockObject().getPaths()[0], lock.getHiveLockObject().getPaths()[1]));
}
}
}
// 3) Get txn tables that are being written
- ValidTxnWriteIdList txnWriteIdList =
- new ValidTxnWriteIdList(conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY));
- if (txnWriteIdList == null) {
+ String txnWriteIdListStr = conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY);
+ if (txnWriteIdListStr == null || txnWriteIdListStr.length() == 0) {
// Nothing to check
return true;
}
+ ValidTxnWriteIdList txnWriteIdList = new ValidTxnWriteIdList(txnWriteIdListStr);
List<Pair<String, Table>> writtenTables = getWrittenTableList(plan);
ValidTxnWriteIdList currentTxnWriteIds =
queryTxnMgr.getValidWriteIds(
[62/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index bc4d168..c2d6a56 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -215,6 +215,18 @@ const char* _kSchemaVersionStateNames[] = {
};
const std::map<int, const char*> _SchemaVersionState_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(8, _kSchemaVersionStateValues, _kSchemaVersionStateNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+int _kIsolationLevelComplianceValues[] = {
+ IsolationLevelCompliance::YES,
+ IsolationLevelCompliance::NO,
+ IsolationLevelCompliance::UNKNOWN
+};
+const char* _kIsolationLevelComplianceNames[] = {
+ "YES",
+ "NO",
+ "UNKNOWN"
+};
+const std::map<int, const char*> _IsolationLevelCompliance_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kIsolationLevelComplianceValues, _kIsolationLevelComplianceNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
int _kFunctionTypeValues[] = {
FunctionType::JAVA
};
@@ -6435,6 +6447,21 @@ void Table::__set_ownerType(const PrincipalType::type val) {
__isset.ownerType = true;
}
+void Table::__set_txnId(const int64_t val) {
+ this->txnId = val;
+__isset.txnId = true;
+}
+
+void Table::__set_validWriteIdList(const std::string& val) {
+ this->validWriteIdList = val;
+__isset.validWriteIdList = true;
+}
+
+void Table::__set_isStatsCompliant(const IsolationLevelCompliance::type val) {
+ this->isStatsCompliant = val;
+__isset.isStatsCompliant = true;
+}
+
uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -6629,6 +6656,32 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 19:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->txnId);
+ this->__isset.txnId = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 20:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->validWriteIdList);
+ this->__isset.validWriteIdList = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 21:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast249;
+ xfer += iprot->readI32(ecast249);
+ this->isStatsCompliant = (IsolationLevelCompliance::type)ecast249;
+ this->__isset.isStatsCompliant = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -6677,10 +6730,10 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 8);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitionKeys.size()));
- std::vector<FieldSchema> ::const_iterator _iter249;
- for (_iter249 = this->partitionKeys.begin(); _iter249 != this->partitionKeys.end(); ++_iter249)
+ std::vector<FieldSchema> ::const_iterator _iter250;
+ for (_iter250 = this->partitionKeys.begin(); _iter250 != this->partitionKeys.end(); ++_iter250)
{
- xfer += (*_iter249).write(oprot);
+ xfer += (*_iter250).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -6689,11 +6742,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
- std::map<std::string, std::string> ::const_iterator _iter250;
- for (_iter250 = this->parameters.begin(); _iter250 != this->parameters.end(); ++_iter250)
+ std::map<std::string, std::string> ::const_iterator _iter251;
+ for (_iter251 = this->parameters.begin(); _iter251 != this->parameters.end(); ++_iter251)
{
- xfer += oprot->writeString(_iter250->first);
- xfer += oprot->writeString(_iter250->second);
+ xfer += oprot->writeString(_iter251->first);
+ xfer += oprot->writeString(_iter251->second);
}
xfer += oprot->writeMapEnd();
}
@@ -6741,6 +6794,21 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeI32((int32_t)this->ownerType);
xfer += oprot->writeFieldEnd();
}
+ if (this->__isset.txnId) {
+ xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 19);
+ xfer += oprot->writeI64(this->txnId);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.validWriteIdList) {
+ xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 20);
+ xfer += oprot->writeString(this->validWriteIdList);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.isStatsCompliant) {
+ xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 21);
+ xfer += oprot->writeI32((int32_t)this->isStatsCompliant);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -6766,31 +6834,13 @@ void swap(Table &a, Table &b) {
swap(a.creationMetadata, b.creationMetadata);
swap(a.catName, b.catName);
swap(a.ownerType, b.ownerType);
+ swap(a.txnId, b.txnId);
+ swap(a.validWriteIdList, b.validWriteIdList);
+ swap(a.isStatsCompliant, b.isStatsCompliant);
swap(a.__isset, b.__isset);
}
-Table::Table(const Table& other251) {
- tableName = other251.tableName;
- dbName = other251.dbName;
- owner = other251.owner;
- createTime = other251.createTime;
- lastAccessTime = other251.lastAccessTime;
- retention = other251.retention;
- sd = other251.sd;
- partitionKeys = other251.partitionKeys;
- parameters = other251.parameters;
- viewOriginalText = other251.viewOriginalText;
- viewExpandedText = other251.viewExpandedText;
- tableType = other251.tableType;
- privileges = other251.privileges;
- temporary = other251.temporary;
- rewriteEnabled = other251.rewriteEnabled;
- creationMetadata = other251.creationMetadata;
- catName = other251.catName;
- ownerType = other251.ownerType;
- __isset = other251.__isset;
-}
-Table& Table::operator=(const Table& other252) {
+Table::Table(const Table& other252) {
tableName = other252.tableName;
dbName = other252.dbName;
owner = other252.owner;
@@ -6809,7 +6859,34 @@ Table& Table::operator=(const Table& other252) {
creationMetadata = other252.creationMetadata;
catName = other252.catName;
ownerType = other252.ownerType;
+ txnId = other252.txnId;
+ validWriteIdList = other252.validWriteIdList;
+ isStatsCompliant = other252.isStatsCompliant;
__isset = other252.__isset;
+}
+Table& Table::operator=(const Table& other253) {
+ tableName = other253.tableName;
+ dbName = other253.dbName;
+ owner = other253.owner;
+ createTime = other253.createTime;
+ lastAccessTime = other253.lastAccessTime;
+ retention = other253.retention;
+ sd = other253.sd;
+ partitionKeys = other253.partitionKeys;
+ parameters = other253.parameters;
+ viewOriginalText = other253.viewOriginalText;
+ viewExpandedText = other253.viewExpandedText;
+ tableType = other253.tableType;
+ privileges = other253.privileges;
+ temporary = other253.temporary;
+ rewriteEnabled = other253.rewriteEnabled;
+ creationMetadata = other253.creationMetadata;
+ catName = other253.catName;
+ ownerType = other253.ownerType;
+ txnId = other253.txnId;
+ validWriteIdList = other253.validWriteIdList;
+ isStatsCompliant = other253.isStatsCompliant;
+ __isset = other253.__isset;
return *this;
}
void Table::printTo(std::ostream& out) const {
@@ -6833,6 +6910,9 @@ void Table::printTo(std::ostream& out) const {
out << ", " << "creationMetadata="; (__isset.creationMetadata ? (out << to_string(creationMetadata)) : (out << "<null>"));
out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "<null>"));
out << ", " << "ownerType="; (__isset.ownerType ? (out << to_string(ownerType)) : (out << "<null>"));
+ out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "<null>"));
+ out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "<null>"));
+ out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "<null>"));
out << ")";
}
@@ -6879,6 +6959,21 @@ void Partition::__set_catName(const std::string& val) {
__isset.catName = true;
}
+void Partition::__set_txnId(const int64_t val) {
+ this->txnId = val;
+__isset.txnId = true;
+}
+
+void Partition::__set_validWriteIdList(const std::string& val) {
+ this->validWriteIdList = val;
+__isset.validWriteIdList = true;
+}
+
+void Partition::__set_isStatsCompliant(const IsolationLevelCompliance::type val) {
+ this->isStatsCompliant = val;
+__isset.isStatsCompliant = true;
+}
+
uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -6904,14 +6999,14 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->values.clear();
- uint32_t _size253;
- ::apache::thrift::protocol::TType _etype256;
- xfer += iprot->readListBegin(_etype256, _size253);
- this->values.resize(_size253);
- uint32_t _i257;
- for (_i257 = 0; _i257 < _size253; ++_i257)
+ uint32_t _size254;
+ ::apache::thrift::protocol::TType _etype257;
+ xfer += iprot->readListBegin(_etype257, _size254);
+ this->values.resize(_size254);
+ uint32_t _i258;
+ for (_i258 = 0; _i258 < _size254; ++_i258)
{
- xfer += iprot->readString(this->values[_i257]);
+ xfer += iprot->readString(this->values[_i258]);
}
xfer += iprot->readListEnd();
}
@@ -6964,17 +7059,17 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
- uint32_t _size258;
- ::apache::thrift::protocol::TType _ktype259;
- ::apache::thrift::protocol::TType _vtype260;
- xfer += iprot->readMapBegin(_ktype259, _vtype260, _size258);
- uint32_t _i262;
- for (_i262 = 0; _i262 < _size258; ++_i262)
+ uint32_t _size259;
+ ::apache::thrift::protocol::TType _ktype260;
+ ::apache::thrift::protocol::TType _vtype261;
+ xfer += iprot->readMapBegin(_ktype260, _vtype261, _size259);
+ uint32_t _i263;
+ for (_i263 = 0; _i263 < _size259; ++_i263)
{
- std::string _key263;
- xfer += iprot->readString(_key263);
- std::string& _val264 = this->parameters[_key263];
- xfer += iprot->readString(_val264);
+ std::string _key264;
+ xfer += iprot->readString(_key264);
+ std::string& _val265 = this->parameters[_key264];
+ xfer += iprot->readString(_val265);
}
xfer += iprot->readMapEnd();
}
@@ -6999,6 +7094,32 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 10:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->txnId);
+ this->__isset.txnId = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 11:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->validWriteIdList);
+ this->__isset.validWriteIdList = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 12:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast266;
+ xfer += iprot->readI32(ecast266);
+ this->isStatsCompliant = (IsolationLevelCompliance::type)ecast266;
+ this->__isset.isStatsCompliant = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -7019,10 +7140,10 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->values.size()));
- std::vector<std::string> ::const_iterator _iter265;
- for (_iter265 = this->values.begin(); _iter265 != this->values.end(); ++_iter265)
+ std::vector<std::string> ::const_iterator _iter267;
+ for (_iter267 = this->values.begin(); _iter267 != this->values.end(); ++_iter267)
{
- xfer += oprot->writeString((*_iter265));
+ xfer += oprot->writeString((*_iter267));
}
xfer += oprot->writeListEnd();
}
@@ -7051,11 +7172,11 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 7);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
- std::map<std::string, std::string> ::const_iterator _iter266;
- for (_iter266 = this->parameters.begin(); _iter266 != this->parameters.end(); ++_iter266)
+ std::map<std::string, std::string> ::const_iterator _iter268;
+ for (_iter268 = this->parameters.begin(); _iter268 != this->parameters.end(); ++_iter268)
{
- xfer += oprot->writeString(_iter266->first);
- xfer += oprot->writeString(_iter266->second);
+ xfer += oprot->writeString(_iter268->first);
+ xfer += oprot->writeString(_iter268->second);
}
xfer += oprot->writeMapEnd();
}
@@ -7071,6 +7192,21 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeString(this->catName);
xfer += oprot->writeFieldEnd();
}
+ if (this->__isset.txnId) {
+ xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 10);
+ xfer += oprot->writeI64(this->txnId);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.validWriteIdList) {
+ xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 11);
+ xfer += oprot->writeString(this->validWriteIdList);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.isStatsCompliant) {
+ xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 12);
+ xfer += oprot->writeI32((int32_t)this->isStatsCompliant);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -7087,32 +7223,41 @@ void swap(Partition &a, Partition &b) {
swap(a.parameters, b.parameters);
swap(a.privileges, b.privileges);
swap(a.catName, b.catName);
+ swap(a.txnId, b.txnId);
+ swap(a.validWriteIdList, b.validWriteIdList);
+ swap(a.isStatsCompliant, b.isStatsCompliant);
swap(a.__isset, b.__isset);
}
-Partition::Partition(const Partition& other267) {
- values = other267.values;
- dbName = other267.dbName;
- tableName = other267.tableName;
- createTime = other267.createTime;
- lastAccessTime = other267.lastAccessTime;
- sd = other267.sd;
- parameters = other267.parameters;
- privileges = other267.privileges;
- catName = other267.catName;
- __isset = other267.__isset;
-}
-Partition& Partition::operator=(const Partition& other268) {
- values = other268.values;
- dbName = other268.dbName;
- tableName = other268.tableName;
- createTime = other268.createTime;
- lastAccessTime = other268.lastAccessTime;
- sd = other268.sd;
- parameters = other268.parameters;
- privileges = other268.privileges;
- catName = other268.catName;
- __isset = other268.__isset;
+Partition::Partition(const Partition& other269) {
+ values = other269.values;
+ dbName = other269.dbName;
+ tableName = other269.tableName;
+ createTime = other269.createTime;
+ lastAccessTime = other269.lastAccessTime;
+ sd = other269.sd;
+ parameters = other269.parameters;
+ privileges = other269.privileges;
+ catName = other269.catName;
+ txnId = other269.txnId;
+ validWriteIdList = other269.validWriteIdList;
+ isStatsCompliant = other269.isStatsCompliant;
+ __isset = other269.__isset;
+}
+Partition& Partition::operator=(const Partition& other270) {
+ values = other270.values;
+ dbName = other270.dbName;
+ tableName = other270.tableName;
+ createTime = other270.createTime;
+ lastAccessTime = other270.lastAccessTime;
+ sd = other270.sd;
+ parameters = other270.parameters;
+ privileges = other270.privileges;
+ catName = other270.catName;
+ txnId = other270.txnId;
+ validWriteIdList = other270.validWriteIdList;
+ isStatsCompliant = other270.isStatsCompliant;
+ __isset = other270.__isset;
return *this;
}
void Partition::printTo(std::ostream& out) const {
@@ -7127,6 +7272,9 @@ void Partition::printTo(std::ostream& out) const {
out << ", " << "parameters=" << to_string(parameters);
out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "<null>"));
out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "<null>"));
+ out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "<null>"));
+ out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "<null>"));
+ out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "<null>"));
out << ")";
}
@@ -7185,14 +7333,14 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->values.clear();
- uint32_t _size269;
- ::apache::thrift::protocol::TType _etype272;
- xfer += iprot->readListBegin(_etype272, _size269);
- this->values.resize(_size269);
- uint32_t _i273;
- for (_i273 = 0; _i273 < _size269; ++_i273)
+ uint32_t _size271;
+ ::apache::thrift::protocol::TType _etype274;
+ xfer += iprot->readListBegin(_etype274, _size271);
+ this->values.resize(_size271);
+ uint32_t _i275;
+ for (_i275 = 0; _i275 < _size271; ++_i275)
{
- xfer += iprot->readString(this->values[_i273]);
+ xfer += iprot->readString(this->values[_i275]);
}
xfer += iprot->readListEnd();
}
@@ -7229,17 +7377,17 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
- uint32_t _size274;
- ::apache::thrift::protocol::TType _ktype275;
- ::apache::thrift::protocol::TType _vtype276;
- xfer += iprot->readMapBegin(_ktype275, _vtype276, _size274);
- uint32_t _i278;
- for (_i278 = 0; _i278 < _size274; ++_i278)
+ uint32_t _size276;
+ ::apache::thrift::protocol::TType _ktype277;
+ ::apache::thrift::protocol::TType _vtype278;
+ xfer += iprot->readMapBegin(_ktype277, _vtype278, _size276);
+ uint32_t _i280;
+ for (_i280 = 0; _i280 < _size276; ++_i280)
{
- std::string _key279;
- xfer += iprot->readString(_key279);
- std::string& _val280 = this->parameters[_key279];
- xfer += iprot->readString(_val280);
+ std::string _key281;
+ xfer += iprot->readString(_key281);
+ std::string& _val282 = this->parameters[_key281];
+ xfer += iprot->readString(_val282);
}
xfer += iprot->readMapEnd();
}
@@ -7276,10 +7424,10 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->values.size()));
- std::vector<std::string> ::const_iterator _iter281;
- for (_iter281 = this->values.begin(); _iter281 != this->values.end(); ++_iter281)
+ std::vector<std::string> ::const_iterator _iter283;
+ for (_iter283 = this->values.begin(); _iter283 != this->values.end(); ++_iter283)
{
- xfer += oprot->writeString((*_iter281));
+ xfer += oprot->writeString((*_iter283));
}
xfer += oprot->writeListEnd();
}
@@ -7300,11 +7448,11 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
- std::map<std::string, std::string> ::const_iterator _iter282;
- for (_iter282 = this->parameters.begin(); _iter282 != this->parameters.end(); ++_iter282)
+ std::map<std::string, std::string> ::const_iterator _iter284;
+ for (_iter284 = this->parameters.begin(); _iter284 != this->parameters.end(); ++_iter284)
{
- xfer += oprot->writeString(_iter282->first);
- xfer += oprot->writeString(_iter282->second);
+ xfer += oprot->writeString(_iter284->first);
+ xfer += oprot->writeString(_iter284->second);
}
xfer += oprot->writeMapEnd();
}
@@ -7331,23 +7479,23 @@ void swap(PartitionWithoutSD &a, PartitionWithoutSD &b) {
swap(a.__isset, b.__isset);
}
-PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other283) {
- values = other283.values;
- createTime = other283.createTime;
- lastAccessTime = other283.lastAccessTime;
- relativePath = other283.relativePath;
- parameters = other283.parameters;
- privileges = other283.privileges;
- __isset = other283.__isset;
-}
-PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other284) {
- values = other284.values;
- createTime = other284.createTime;
- lastAccessTime = other284.lastAccessTime;
- relativePath = other284.relativePath;
- parameters = other284.parameters;
- privileges = other284.privileges;
- __isset = other284.__isset;
+PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other285) {
+ values = other285.values;
+ createTime = other285.createTime;
+ lastAccessTime = other285.lastAccessTime;
+ relativePath = other285.relativePath;
+ parameters = other285.parameters;
+ privileges = other285.privileges;
+ __isset = other285.__isset;
+}
+PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other286) {
+ values = other286.values;
+ createTime = other286.createTime;
+ lastAccessTime = other286.lastAccessTime;
+ relativePath = other286.relativePath;
+ parameters = other286.parameters;
+ privileges = other286.privileges;
+ __isset = other286.__isset;
return *this;
}
void PartitionWithoutSD::printTo(std::ostream& out) const {
@@ -7400,14 +7548,14 @@ uint32_t PartitionSpecWithSharedSD::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
- uint32_t _size285;
- ::apache::thrift::protocol::TType _etype288;
- xfer += iprot->readListBegin(_etype288, _size285);
- this->partitions.resize(_size285);
- uint32_t _i289;
- for (_i289 = 0; _i289 < _size285; ++_i289)
+ uint32_t _size287;
+ ::apache::thrift::protocol::TType _etype290;
+ xfer += iprot->readListBegin(_etype290, _size287);
+ this->partitions.resize(_size287);
+ uint32_t _i291;
+ for (_i291 = 0; _i291 < _size287; ++_i291)
{
- xfer += this->partitions[_i289].read(iprot);
+ xfer += this->partitions[_i291].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -7444,10 +7592,10 @@ uint32_t PartitionSpecWithSharedSD::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
- std::vector<PartitionWithoutSD> ::const_iterator _iter290;
- for (_iter290 = this->partitions.begin(); _iter290 != this->partitions.end(); ++_iter290)
+ std::vector<PartitionWithoutSD> ::const_iterator _iter292;
+ for (_iter292 = this->partitions.begin(); _iter292 != this->partitions.end(); ++_iter292)
{
- xfer += (*_iter290).write(oprot);
+ xfer += (*_iter292).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -7469,15 +7617,15 @@ void swap(PartitionSpecWithSharedSD &a, PartitionSpecWithSharedSD &b) {
swap(a.__isset, b.__isset);
}
-PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other291) {
- partitions = other291.partitions;
- sd = other291.sd;
- __isset = other291.__isset;
+PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other293) {
+ partitions = other293.partitions;
+ sd = other293.sd;
+ __isset = other293.__isset;
}
-PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other292) {
- partitions = other292.partitions;
- sd = other292.sd;
- __isset = other292.__isset;
+PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other294) {
+ partitions = other294.partitions;
+ sd = other294.sd;
+ __isset = other294.__isset;
return *this;
}
void PartitionSpecWithSharedSD::printTo(std::ostream& out) const {
@@ -7522,14 +7670,14 @@ uint32_t PartitionListComposingSpec::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
- uint32_t _size293;
- ::apache::thrift::protocol::TType _etype296;
- xfer += iprot->readListBegin(_etype296, _size293);
- this->partitions.resize(_size293);
- uint32_t _i297;
- for (_i297 = 0; _i297 < _size293; ++_i297)
+ uint32_t _size295;
+ ::apache::thrift::protocol::TType _etype298;
+ xfer += iprot->readListBegin(_etype298, _size295);
+ this->partitions.resize(_size295);
+ uint32_t _i299;
+ for (_i299 = 0; _i299 < _size295; ++_i299)
{
- xfer += this->partitions[_i297].read(iprot);
+ xfer += this->partitions[_i299].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -7558,10 +7706,10 @@ uint32_t PartitionListComposingSpec::write(::apache::thrift::protocol::TProtocol
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
- std::vector<Partition> ::const_iterator _iter298;
- for (_iter298 = this->partitions.begin(); _iter298 != this->partitions.end(); ++_iter298)
+ std::vector<Partition> ::const_iterator _iter300;
+ for (_iter300 = this->partitions.begin(); _iter300 != this->partitions.end(); ++_iter300)
{
- xfer += (*_iter298).write(oprot);
+ xfer += (*_iter300).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -7578,13 +7726,13 @@ void swap(PartitionListComposingSpec &a, PartitionListComposingSpec &b) {
swap(a.__isset, b.__isset);
}
-PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other299) {
- partitions = other299.partitions;
- __isset = other299.__isset;
+PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other301) {
+ partitions = other301.partitions;
+ __isset = other301.__isset;
}
-PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other300) {
- partitions = other300.partitions;
- __isset = other300.__isset;
+PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other302) {
+ partitions = other302.partitions;
+ __isset = other302.__isset;
return *this;
}
void PartitionListComposingSpec::printTo(std::ostream& out) const {
@@ -7626,6 +7774,21 @@ void PartitionSpec::__set_catName(const std::string& val) {
__isset.catName = true;
}
+void PartitionSpec::__set_txnId(const int64_t val) {
+ this->txnId = val;
+__isset.txnId = true;
+}
+
+void PartitionSpec::__set_validWriteIdList(const std::string& val) {
+ this->validWriteIdList = val;
+__isset.validWriteIdList = true;
+}
+
+void PartitionSpec::__set_isStatsCompliant(const IsolationLevelCompliance::type val) {
+ this->isStatsCompliant = val;
+__isset.isStatsCompliant = true;
+}
+
uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -7695,6 +7858,32 @@ uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 7:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->txnId);
+ this->__isset.txnId = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 8:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->validWriteIdList);
+ this->__isset.validWriteIdList = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 9:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast303;
+ xfer += iprot->readI32(ecast303);
+ this->isStatsCompliant = (IsolationLevelCompliance::type)ecast303;
+ this->__isset.isStatsCompliant = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -7739,6 +7928,21 @@ uint32_t PartitionSpec::write(::apache::thrift::protocol::TProtocol* oprot) cons
xfer += oprot->writeString(this->catName);
xfer += oprot->writeFieldEnd();
}
+ if (this->__isset.txnId) {
+ xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 7);
+ xfer += oprot->writeI64(this->txnId);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.validWriteIdList) {
+ xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 8);
+ xfer += oprot->writeString(this->validWriteIdList);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.isStatsCompliant) {
+ xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 9);
+ xfer += oprot->writeI32((int32_t)this->isStatsCompliant);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -7752,26 +7956,35 @@ void swap(PartitionSpec &a, PartitionSpec &b) {
swap(a.sharedSDPartitionSpec, b.sharedSDPartitionSpec);
swap(a.partitionList, b.partitionList);
swap(a.catName, b.catName);
+ swap(a.txnId, b.txnId);
+ swap(a.validWriteIdList, b.validWriteIdList);
+ swap(a.isStatsCompliant, b.isStatsCompliant);
swap(a.__isset, b.__isset);
}
-PartitionSpec::PartitionSpec(const PartitionSpec& other301) {
- dbName = other301.dbName;
- tableName = other301.tableName;
- rootPath = other301.rootPath;
- sharedSDPartitionSpec = other301.sharedSDPartitionSpec;
- partitionList = other301.partitionList;
- catName = other301.catName;
- __isset = other301.__isset;
+PartitionSpec::PartitionSpec(const PartitionSpec& other304) {
+ dbName = other304.dbName;
+ tableName = other304.tableName;
+ rootPath = other304.rootPath;
+ sharedSDPartitionSpec = other304.sharedSDPartitionSpec;
+ partitionList = other304.partitionList;
+ catName = other304.catName;
+ txnId = other304.txnId;
+ validWriteIdList = other304.validWriteIdList;
+ isStatsCompliant = other304.isStatsCompliant;
+ __isset = other304.__isset;
}
-PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other302) {
- dbName = other302.dbName;
- tableName = other302.tableName;
- rootPath = other302.rootPath;
- sharedSDPartitionSpec = other302.sharedSDPartitionSpec;
- partitionList = other302.partitionList;
- catName = other302.catName;
- __isset = other302.__isset;
+PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other305) {
+ dbName = other305.dbName;
+ tableName = other305.tableName;
+ rootPath = other305.rootPath;
+ sharedSDPartitionSpec = other305.sharedSDPartitionSpec;
+ partitionList = other305.partitionList;
+ catName = other305.catName;
+ txnId = other305.txnId;
+ validWriteIdList = other305.validWriteIdList;
+ isStatsCompliant = other305.isStatsCompliant;
+ __isset = other305.__isset;
return *this;
}
void PartitionSpec::printTo(std::ostream& out) const {
@@ -7783,6 +7996,9 @@ void PartitionSpec::printTo(std::ostream& out) const {
out << ", " << "sharedSDPartitionSpec="; (__isset.sharedSDPartitionSpec ? (out << to_string(sharedSDPartitionSpec)) : (out << "<null>"));
out << ", " << "partitionList="; (__isset.partitionList ? (out << to_string(partitionList)) : (out << "<null>"));
out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "<null>"));
+ out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "<null>"));
+ out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "<null>"));
+ out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "<null>"));
out << ")";
}
@@ -7918,19 +8134,19 @@ void swap(BooleanColumnStatsData &a, BooleanColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other303) {
- numTrues = other303.numTrues;
- numFalses = other303.numFalses;
- numNulls = other303.numNulls;
- bitVectors = other303.bitVectors;
- __isset = other303.__isset;
+BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other306) {
+ numTrues = other306.numTrues;
+ numFalses = other306.numFalses;
+ numNulls = other306.numNulls;
+ bitVectors = other306.bitVectors;
+ __isset = other306.__isset;
}
-BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other304) {
- numTrues = other304.numTrues;
- numFalses = other304.numFalses;
- numNulls = other304.numNulls;
- bitVectors = other304.bitVectors;
- __isset = other304.__isset;
+BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other307) {
+ numTrues = other307.numTrues;
+ numFalses = other307.numFalses;
+ numNulls = other307.numNulls;
+ bitVectors = other307.bitVectors;
+ __isset = other307.__isset;
return *this;
}
void BooleanColumnStatsData::printTo(std::ostream& out) const {
@@ -8093,21 +8309,21 @@ void swap(DoubleColumnStatsData &a, DoubleColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other305) {
- lowValue = other305.lowValue;
- highValue = other305.highValue;
- numNulls = other305.numNulls;
- numDVs = other305.numDVs;
- bitVectors = other305.bitVectors;
- __isset = other305.__isset;
+DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other308) {
+ lowValue = other308.lowValue;
+ highValue = other308.highValue;
+ numNulls = other308.numNulls;
+ numDVs = other308.numDVs;
+ bitVectors = other308.bitVectors;
+ __isset = other308.__isset;
}
-DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other306) {
- lowValue = other306.lowValue;
- highValue = other306.highValue;
- numNulls = other306.numNulls;
- numDVs = other306.numDVs;
- bitVectors = other306.bitVectors;
- __isset = other306.__isset;
+DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other309) {
+ lowValue = other309.lowValue;
+ highValue = other309.highValue;
+ numNulls = other309.numNulls;
+ numDVs = other309.numDVs;
+ bitVectors = other309.bitVectors;
+ __isset = other309.__isset;
return *this;
}
void DoubleColumnStatsData::printTo(std::ostream& out) const {
@@ -8271,21 +8487,21 @@ void swap(LongColumnStatsData &a, LongColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other307) {
- lowValue = other307.lowValue;
- highValue = other307.highValue;
- numNulls = other307.numNulls;
- numDVs = other307.numDVs;
- bitVectors = other307.bitVectors;
- __isset = other307.__isset;
+LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other310) {
+ lowValue = other310.lowValue;
+ highValue = other310.highValue;
+ numNulls = other310.numNulls;
+ numDVs = other310.numDVs;
+ bitVectors = other310.bitVectors;
+ __isset = other310.__isset;
}
-LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other308) {
- lowValue = other308.lowValue;
- highValue = other308.highValue;
- numNulls = other308.numNulls;
- numDVs = other308.numDVs;
- bitVectors = other308.bitVectors;
- __isset = other308.__isset;
+LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other311) {
+ lowValue = other311.lowValue;
+ highValue = other311.highValue;
+ numNulls = other311.numNulls;
+ numDVs = other311.numDVs;
+ bitVectors = other311.bitVectors;
+ __isset = other311.__isset;
return *this;
}
void LongColumnStatsData::printTo(std::ostream& out) const {
@@ -8451,21 +8667,21 @@ void swap(StringColumnStatsData &a, StringColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other309) {
- maxColLen = other309.maxColLen;
- avgColLen = other309.avgColLen;
- numNulls = other309.numNulls;
- numDVs = other309.numDVs;
- bitVectors = other309.bitVectors;
- __isset = other309.__isset;
+StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other312) {
+ maxColLen = other312.maxColLen;
+ avgColLen = other312.avgColLen;
+ numNulls = other312.numNulls;
+ numDVs = other312.numDVs;
+ bitVectors = other312.bitVectors;
+ __isset = other312.__isset;
}
-StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other310) {
- maxColLen = other310.maxColLen;
- avgColLen = other310.avgColLen;
- numNulls = other310.numNulls;
- numDVs = other310.numDVs;
- bitVectors = other310.bitVectors;
- __isset = other310.__isset;
+StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other313) {
+ maxColLen = other313.maxColLen;
+ avgColLen = other313.avgColLen;
+ numNulls = other313.numNulls;
+ numDVs = other313.numDVs;
+ bitVectors = other313.bitVectors;
+ __isset = other313.__isset;
return *this;
}
void StringColumnStatsData::printTo(std::ostream& out) const {
@@ -8611,19 +8827,19 @@ void swap(BinaryColumnStatsData &a, BinaryColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other311) {
- maxColLen = other311.maxColLen;
- avgColLen = other311.avgColLen;
- numNulls = other311.numNulls;
- bitVectors = other311.bitVectors;
- __isset = other311.__isset;
+BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other314) {
+ maxColLen = other314.maxColLen;
+ avgColLen = other314.avgColLen;
+ numNulls = other314.numNulls;
+ bitVectors = other314.bitVectors;
+ __isset = other314.__isset;
}
-BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other312) {
- maxColLen = other312.maxColLen;
- avgColLen = other312.avgColLen;
- numNulls = other312.numNulls;
- bitVectors = other312.bitVectors;
- __isset = other312.__isset;
+BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other315) {
+ maxColLen = other315.maxColLen;
+ avgColLen = other315.avgColLen;
+ numNulls = other315.numNulls;
+ bitVectors = other315.bitVectors;
+ __isset = other315.__isset;
return *this;
}
void BinaryColumnStatsData::printTo(std::ostream& out) const {
@@ -8728,13 +8944,13 @@ void swap(Decimal &a, Decimal &b) {
swap(a.unscaled, b.unscaled);
}
-Decimal::Decimal(const Decimal& other313) {
- scale = other313.scale;
- unscaled = other313.unscaled;
+Decimal::Decimal(const Decimal& other316) {
+ scale = other316.scale;
+ unscaled = other316.unscaled;
}
-Decimal& Decimal::operator=(const Decimal& other314) {
- scale = other314.scale;
- unscaled = other314.unscaled;
+Decimal& Decimal::operator=(const Decimal& other317) {
+ scale = other317.scale;
+ unscaled = other317.unscaled;
return *this;
}
void Decimal::printTo(std::ostream& out) const {
@@ -8895,21 +9111,21 @@ void swap(DecimalColumnStatsData &a, DecimalColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other315) {
- lowValue = other315.lowValue;
- highValue = other315.highValue;
- numNulls = other315.numNulls;
- numDVs = other315.numDVs;
- bitVectors = other315.bitVectors;
- __isset = other315.__isset;
+DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other318) {
+ lowValue = other318.lowValue;
+ highValue = other318.highValue;
+ numNulls = other318.numNulls;
+ numDVs = other318.numDVs;
+ bitVectors = other318.bitVectors;
+ __isset = other318.__isset;
}
-DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other316) {
- lowValue = other316.lowValue;
- highValue = other316.highValue;
- numNulls = other316.numNulls;
- numDVs = other316.numDVs;
- bitVectors = other316.bitVectors;
- __isset = other316.__isset;
+DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other319) {
+ lowValue = other319.lowValue;
+ highValue = other319.highValue;
+ numNulls = other319.numNulls;
+ numDVs = other319.numDVs;
+ bitVectors = other319.bitVectors;
+ __isset = other319.__isset;
return *this;
}
void DecimalColumnStatsData::printTo(std::ostream& out) const {
@@ -8995,11 +9211,11 @@ void swap(Date &a, Date &b) {
swap(a.daysSinceEpoch, b.daysSinceEpoch);
}
-Date::Date(const Date& other317) {
- daysSinceEpoch = other317.daysSinceEpoch;
+Date::Date(const Date& other320) {
+ daysSinceEpoch = other320.daysSinceEpoch;
}
-Date& Date::operator=(const Date& other318) {
- daysSinceEpoch = other318.daysSinceEpoch;
+Date& Date::operator=(const Date& other321) {
+ daysSinceEpoch = other321.daysSinceEpoch;
return *this;
}
void Date::printTo(std::ostream& out) const {
@@ -9159,21 +9375,21 @@ void swap(DateColumnStatsData &a, DateColumnStatsData &b) {
swap(a.__isset, b.__isset);
}
-DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other319) {
- lowValue = other319.lowValue;
- highValue = other319.highValue;
- numNulls = other319.numNulls;
- numDVs = other319.numDVs;
- bitVectors = other319.bitVectors;
- __isset = other319.__isset;
+DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other322) {
+ lowValue = other322.lowValue;
+ highValue = other322.highValue;
+ numNulls = other322.numNulls;
+ numDVs = other322.numDVs;
+ bitVectors = other322.bitVectors;
+ __isset = other322.__isset;
}
-DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other320) {
- lowValue = other320.lowValue;
- highValue = other320.highValue;
- numNulls = other320.numNulls;
- numDVs = other320.numDVs;
- bitVectors = other320.bitVectors;
- __isset = other320.__isset;
+DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other323) {
+ lowValue = other323.lowValue;
+ highValue = other323.highValue;
+ numNulls = other323.numNulls;
+ numDVs = other323.numDVs;
+ bitVectors = other323.bitVectors;
+ __isset = other323.__isset;
return *this;
}
void DateColumnStatsData::printTo(std::ostream& out) const {
@@ -9359,25 +9575,25 @@ void swap(ColumnStatisticsData &a, ColumnStatisticsData &b) {
swap(a.__isset, b.__isset);
}
-ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other321) {
- booleanStats = other321.booleanStats;
- longStats = other321.longStats;
- doubleStats = other321.doubleStats;
- stringStats = other321.stringStats;
- binaryStats = other321.binaryStats;
- decimalStats = other321.decimalStats;
- dateStats = other321.dateStats;
- __isset = other321.__isset;
-}
-ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other322) {
- booleanStats = other322.booleanStats;
- longStats = other322.longStats;
- doubleStats = other322.doubleStats;
- stringStats = other322.stringStats;
- binaryStats = other322.binaryStats;
- decimalStats = other322.decimalStats;
- dateStats = other322.dateStats;
- __isset = other322.__isset;
+ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other324) {
+ booleanStats = other324.booleanStats;
+ longStats = other324.longStats;
+ doubleStats = other324.doubleStats;
+ stringStats = other324.stringStats;
+ binaryStats = other324.binaryStats;
+ decimalStats = other324.decimalStats;
+ dateStats = other324.dateStats;
+ __isset = other324.__isset;
+}
+ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other325) {
+ booleanStats = other325.booleanStats;
+ longStats = other325.longStats;
+ doubleStats = other325.doubleStats;
+ stringStats = other325.stringStats;
+ binaryStats = other325.binaryStats;
+ decimalStats = other325.decimalStats;
+ dateStats = other325.dateStats;
+ __isset = other325.__isset;
return *this;
}
void ColumnStatisticsData::printTo(std::ostream& out) const {
@@ -9505,15 +9721,15 @@ void swap(ColumnStatisticsObj &a, ColumnStatisticsObj &b) {
swap(a.statsData, b.statsData);
}
-ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other323) {
- colName = other323.colName;
- colType = other323.colType;
- statsData = other323.statsData;
+ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other326) {
+ colName = other326.colName;
+ colType = other326.colType;
+ statsData = other326.statsData;
}
-ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other324) {
- colName = other324.colName;
- colType = other324.colType;
- statsData = other324.statsData;
+ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other327) {
+ colName = other327.colName;
+ colType = other327.colType;
+ statsData = other327.statsData;
return *this;
}
void ColumnStatisticsObj::printTo(std::ostream& out) const {
@@ -9695,23 +9911,23 @@ void swap(ColumnStatisticsDesc &a, ColumnStatisticsDesc &b) {
swap(a.__isset, b.__isset);
}
-ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other325) {
- isTblLevel = other325.isTblLevel;
- dbName = other325.dbName;
- tableName = other325.tableName;
- partName = other325.partName;
- lastAnalyzed = other325.lastAnalyzed;
- catName = other325.catName;
- __isset = other325.__isset;
-}
-ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other326) {
- isTblLevel = other326.isTblLevel;
- dbName = other326.dbName;
- tableName = other326.tableName;
- partName = other326.partName;
- lastAnalyzed = other326.lastAnalyzed;
- catName = other326.catName;
- __isset = other326.__isset;
+ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other328) {
+ isTblLevel = other328.isTblLevel;
+ dbName = other328.dbName;
+ tableName = other328.tableName;
+ partName = other328.partName;
+ lastAnalyzed = other328.lastAnalyzed;
+ catName = other328.catName;
+ __isset = other328.__isset;
+}
+ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other329) {
+ isTblLevel = other329.isTblLevel;
+ dbName = other329.dbName;
+ tableName = other329.tableName;
+ partName = other329.partName;
+ lastAnalyzed = other329.lastAnalyzed;
+ catName = other329.catName;
+ __isset = other329.__isset;
return *this;
}
void ColumnStatisticsDesc::printTo(std::ostream& out) const {
@@ -9739,6 +9955,21 @@ void ColumnStatistics::__set_statsObj(const std::vector<ColumnStatisticsObj> & v
this->statsObj = val;
}
+void ColumnStatistics::__set_txnId(const int64_t val) {
+ this->txnId = val;
+__isset.txnId = true;
+}
+
+void ColumnStatistics::__set_validWriteIdList(const std::string& val) {
+ this->validWriteIdList = val;
+__isset.validWriteIdList = true;
+}
+
+void ColumnStatistics::__set_isStatsCompliant(const IsolationLevelCompliance::type val) {
+ this->isStatsCompliant = val;
+__isset.isStatsCompliant = true;
+}
+
uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -9774,14 +10005,14 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->statsObj.clear();
- uint32_t _size327;
- ::apache::thrift::protocol::TType _etype330;
- xfer += iprot->readListBegin(_etype330, _size327);
- this->statsObj.resize(_size327);
- uint32_t _i331;
- for (_i331 = 0; _i331 < _size327; ++_i331)
+ uint32_t _size330;
+ ::apache::thrift::protocol::TType _etype333;
+ xfer += iprot->readListBegin(_etype333, _size330);
+ this->statsObj.resize(_size330);
+ uint32_t _i334;
+ for (_i334 = 0; _i334 < _size330; ++_i334)
{
- xfer += this->statsObj[_i331].read(iprot);
+ xfer += this->statsObj[_i334].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9790,6 +10021,32 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->txnId);
+ this->__isset.txnId = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->validWriteIdList);
+ this->__isset.validWriteIdList = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 5:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast335;
+ xfer += iprot->readI32(ecast335);
+ this->isStatsCompliant = (IsolationLevelCompliance::type)ecast335;
+ this->__isset.isStatsCompliant = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -9818,15 +10075,30 @@ uint32_t ColumnStatistics::write(::apache::thrift::protocol::TProtocol* oprot) c
xfer += oprot->writeFieldBegin("statsObj", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->statsObj.size()));
- std::vector<ColumnStatisticsObj> ::const_iterator _iter332;
- for (_iter332 = this->statsObj.begin(); _iter332 != this->statsObj.end(); ++_iter332)
+ std::vector<ColumnStatisticsObj> ::const_iterator _iter336;
+ for (_iter336 = this->statsObj.begin(); _iter336 != this->statsObj.end(); ++_iter336)
{
- xfer += (*_iter332).write(oprot);
+ xfer += (*_iter336).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
+ if (this->__isset.txnId) {
+ xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 3);
+ xfer += oprot->writeI64(this->txnId);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.validWriteIdList) {
+ xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 4);
+ xfer += oprot->writeString(this->validWriteIdList);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.isStatsCompliant) {
+ xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 5);
+ xfer += oprot->writeI32((int32_t)this->isStatsCompliant);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -9836,15 +10108,27 @@ void swap(ColumnStatistics &a, ColumnStatistics &b) {
using ::std::swap;
swap(a.statsDesc, b.statsDesc);
swap(a.statsObj, b.statsObj);
+ swap(a.txnId, b.txnId);
+ swap(a.validWriteIdList, b.validWriteIdList);
+ swap(a.isStatsCompliant, b.isStatsCompliant);
+ swap(a.__isset, b.__isset);
}
-ColumnStatistics::ColumnStatistics(const ColumnStatistics& other333) {
- statsDesc = other333.statsDesc;
- statsObj = other333.statsObj;
-}
-ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other334) {
- statsDesc = other334.statsDesc;
- statsObj = other334.statsObj;
+ColumnStatistics::ColumnStatistics(const ColumnStatistics& other337) {
+ statsDesc = other337.statsDesc;
+ statsObj = other337.statsObj;
+ txnId = other337.txnId;
+ validWriteIdList = other337.validWriteIdList;
+ isStatsCompliant = other337.isStatsCompliant;
+ __isset = other337.__isset;
+}
+ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other338) {
+ statsDesc = other338.statsDesc;
+ statsObj = other338.statsObj;
+ txnId = other338.txnId;
+ validWriteIdList = other338.validWriteIdList;
+ isStatsCompliant = other338.isStatsCompliant;
+ __isset = other338.__isset;
return *this;
}
void ColumnStatistics::printTo(std::ostream& out) const {
@@ -9852,6 +10136,9 @@ void ColumnStatistics::printTo(std::ostream& out) const {
out << "ColumnStatistics(";
out << "statsDesc=" << to_string(statsDesc);
out << ", " << "statsObj=" << to_string(statsObj);
+ out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "<null>"));
+ out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "<null>"));
+ out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "<null>"));
out << ")";
}
@@ -9868,6 +10155,11 @@ void AggrStats::__set_partsFound(const int64_t val) {
this->partsFound = val;
}
+void AggrStats::__set_isStatsCompliant(const IsolationLevelCompliance::type val) {
+ this->isStatsCompliant = val;
+__isset.isStatsCompliant = true;
+}
+
uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -9895,14 +10187,14 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->colStats.clear();
- uint32_t _size335;
- ::apache::thrift::protocol::TType _etype338;
- xfer += iprot->readListBegin(_etype338, _size335);
- this->colStats.resize(_size335);
- uint32_t _i339;
- for (_i339 = 0; _i339 < _size335; ++_i339)
+ uint32_t _size339;
+ ::apache::thrift::protocol::TType _etype342;
+ xfer += iprot->readListBegin(_etype342, _size339);
+ this->colStats.resize(_size339);
+ uint32_t _i343;
+ for (_i343 = 0; _i343 < _size339; ++_i343)
{
- xfer += this->colStats[_i339].read(iprot);
+ xfer += this->colStats[_i343].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9919,6 +10211,16 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) {
xfer += iprot->skip(ftype);
}
break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast344;
+ xfer += iprot->readI32(ecast344);
+ this->isStatsCompliant = (IsolationLevelCompliance::type)ecast344;
+ this->__isset.isStatsCompliant = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -9943,10 +10245,10 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->colStats.size()));
- std::vector<ColumnStatisticsObj> ::const_iterator _iter340;
- for (_iter340 = this->colStats.begin(); _iter340 != this->colStats.end(); ++_iter340)
+ std::vector<ColumnStatisticsObj> ::const_iterator _iter345;
+ for (_iter345 = this->colStats.begin(); _iter345 != this->colStats.end(); ++_iter345)
{
- xfer += (*_iter340).write(oprot);
+ xfer += (*_iter345).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -9956,6 +10258,11 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeI64(this->partsFound);
xfer += oprot->writeFieldEnd();
+ if (this->__isset.isStatsCompliant) {
+ xfer += oprot->writeFieldBegin("isStatsCompliant", ::apache::thrift::protocol::T_I32, 3);
+ xfer += oprot->writeI32((int32_t)this->isStatsCompliant);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -9965,15 +10272,21 @@ void swap(AggrStats &a, AggrStats &b) {
using ::std::swap;
swap(a.colStats, b.colStats);
swap(a.partsFound, b.partsFound);
+ swap(a.isStatsCompliant, b.isStatsCompliant);
+ swap(a.__isset, b.__isset);
}
-AggrStats::AggrStats(const AggrStats& other341) {
- colStats = other341.colStats;
- partsFound = other341.partsFound;
+AggrStats::AggrStats(const AggrStats& other346) {
+ colStats = other346.colStats;
+ partsFound = other346.partsFound;
+ isStatsCompliant = other346.isStatsCompliant;
+ __isset = other346.__isset;
}
-AggrStats& AggrStats::operator=(const AggrStats& other342) {
- colStats = other342.colStats;
- partsFound = other342.partsFound;
+AggrStats& AggrStats::operator=(const AggrStats& other347) {
+ colStats = other347.colStats;
+ partsFound = other347.partsFound;
+ isStatsCompliant = other347.isStatsCompliant;
+ __isset = other347.__isset;
return *this;
}
void AggrStats::printTo(std::ostream& out) const {
@@ -9981,6 +10294,7 @@ void AggrStats::printTo(std::ostream& out) const {
out << "AggrStats(";
out << "colStats=" << to_string(colStats);
out << ", " << "partsFound=" << to_string(partsFound);
+ out << ", " << "isStatsCompliant="; (__isset.isStatsCompliant ? (out << to_string(isStatsCompliant)) : (out << "<null>"));
out << ")";
}
@@ -9998,6 +10312,16 @@ void SetPartitionsStatsRequest::__set_needMerge(const bool val) {
__isset.needMerge = true;
}
+void SetPartitionsStatsRequest::__set_txnId(const int64_t val) {
+ this->txnId = val;
+__isset.txnId = true;
+}
+
+void SetPartitionsStatsRequest::__set_validWriteIdList(const std::string& val) {
+ this->validWriteIdList = val;
+__isset.validWriteIdList = true;
+}
+
uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -10024,14 +10348,14 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->colStats.clear();
- uint32_t _size343;
- ::apache::thrift::protocol::TType _etype346;
- xfer += iprot->readListBegin(_etype346, _size343);
- this->colStats.resize(_size343);
- uint32_t _i347;
- for (_i347 = 0; _i347 < _size343; ++_i347)
+ uint32_t _size348;
+ ::apache::thrift::protocol::TType _etype351;
+ xfer += iprot->readListBegin(_etype351, _size348);
+ this->colStats.resize(_size348);
+ uint32_t _i352;
+ for (_i352 = 0; _i352 < _size348; ++_i352)
{
- xfer += this->colStats[_i347].read(iprot);
+ xfer += this->colStats[_i352].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10048,6 +10372,22 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol*
xfer += iprot->skip(ftype);
}
break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->txnId);
+ this->__isset.txnId = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->validWriteIdList);
+ this->__isset.validWriteIdList = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
default:
xfer += iprot->skip(ftype);
break;
@@ -10070,10 +10410,10 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->colStats.size()));
- std::vector<ColumnStatistics> ::const_iterator _iter348;
- for (_iter348 = this->colStats.begin(); _iter348 != this->colStats.end(); ++_iter348)
+ std::vector<ColumnStatistics> ::const_iterator _iter353;
+ for (_iter353 = this->colStats.begin(); _iter353 != this->colStats.end(); ++_iter353)
{
- xfer += (*_iter348).write(oprot);
+ xfer += (*_iter353).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10084,6 +10424,16 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeBool(this->needMerge);
xfer += oprot->writeFieldEnd();
}
+ if (this->__isset.txnId) {
+ xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 3);
+ xfer += oprot->writeI64(this->txnId);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.validWriteIdList) {
+ xfer += oprot->writeFieldBegin("validWriteIdList", ::apache::thrift::protocol::T_STRING, 4);
+ xfer += oprot->writeString(this->validWriteIdList);
+ xfer += oprot->writeFieldEnd();
+ }
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
@@ -10093,18 +10443,24 @@ void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) {
using ::std::swap;
swap(a.colStats, b.colStats);
swap(a.needMerge, b.needMerge);
+ swap(a.txnId, b.txnId);
+ swap(a.validWriteIdList, b.validWriteIdList);
swap(a.__isset, b.__isset);
}
-SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other349) {
- colStats = other349.colStats;
- needMerge = other349.needMerge;
- __isset = other349.__isset;
+SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other354) {
+ colStats = other354.colStats;
+ needMerge = other354.needMerge;
+ txnId = other354.txnId;
+ validWriteIdList = other354.validWriteIdList;
+ __isset = other354.__isset;
}
-SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other350) {
- colStats = other350.colStats;
- needMerge = other350.needMerge;
- __isset = other350.__isset;
+SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other355) {
+ colStats = other355.colStats;
+ needMerge = other355.needMerge;
+ txnId = other355.txnId;
+ validWriteIdList = other355.validWriteIdList;
+ __isset = other355.__isset;
return *this;
}
void SetPartitionsStatsRequest::printTo(std::ostream& out) const {
@@ -10112,6 +10468,8 @@ void SetPartitionsStatsRequest::printTo(std::ostream& out) const {
out << "SetPartitionsStatsRequest(";
out << "colStats=" << to_string(colStats);
out << ", " << "needMerge="; (__isset.needMerge ? (out << to_string(needMerge)) : (out << "<null>"));
+ out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out << "<null>"));
+ out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "<null>"));
out << ")";
}
@@ -10153,14 +10511,14 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->fieldSchemas.clear();
- uint32_t _size351;
- ::apache::thrift::protocol::TType _etype354;
- xfer += iprot->readListBegin(_etype354, _size351);
- this->fieldSchemas.resize(_size351);
- uint32_t _i355;
- for (_i355 = 0; _i355 < _size351; ++_i355)
+ uint32_t _size356;
+ ::apache::thrift::protocol::TType _etype359;
+ xfer += iprot->readListBegin(_etype359, _size356);
+ this->fieldSchemas.resize(_size356);
+ uint32_t _i360;
+ for (_i360 = 0; _i360 < _size356; ++_i360)
{
- xfer += this->fieldSchemas[_i355].read(iprot);
+ xfer += this->fieldSchemas[_i360].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10173,17 +10531,17 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) {
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->properties.clear();
- uint32_t _size356;
- ::apache::thrift::protocol::TType _ktype357;
- ::apache::thrift::protocol::TType _vtype358;
- xfer += iprot->readMapBegin(_ktype357, _vtype358, _size356);
- uint32_t _i360;
- for (_i360 = 0; _i360 < _size356; ++_i360)
+ uint32_t _size361;
+ ::apache::thrift::protocol::TType _ktype362;
+ ::apache::thrift::protocol::TType _vtype363;
+ xfer += iprot->readMapBegin(_ktype362, _vtype363, _size361);
+ uint32_t _i365;
+ for (_i365 = 0; _i365 < _size361; ++_i365)
{
- std::string _key361;
- xfer += iprot->readString(_key361);
- std::string& _val362 = this->properties[_key361];
- xfer += iprot->readString(_val362);
+ std::string _key366;
+ xfer += iprot->readString(_key366);
+ std::string& _val367 = this->properties[_key366];
+ xfer += iprot->readString(_val367);
}
xfer += iprot->readMapEnd();
}
@@ -10212,10 +10570,10 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->fieldSchemas.size()));
- std::vector<FieldSchema> ::const_iterator _iter363;
- for (_iter363 = this->fieldSchemas.begin(); _iter363 != this->fieldSchemas.end(); ++_iter363)
+ std::vector<FieldSchema> ::const_iterator _iter368;
+ for (_iter368 = this->fieldSchemas.begin(); _iter368 != this->fieldSchemas.end(); ++_iter368)
{
- xfer += (*_iter363).write(oprot);
+ xfer += (*_iter368).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10224,11 +10582,11 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const {
xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->properties.size()));
- std::map<std::string, std::string> ::const_iterator _iter364;
- for (_iter364 = this->properties.begin(); _iter364 != this->properties.end(); ++_iter364)
+ std::map<std::string, std::string> ::const_iterator _iter369;
+ for (_iter369 = this->properties.begin(); _iter369 != this->properties.end(); ++_iter369)
{
- xfer += oprot->writeString(_iter364->first);
- xfer += oprot->writeString(_iter364->second);
+ xfer += oprot->writeString(_iter369->first);
+ xfer += oprot->writeString(_iter369->second);
}
xfer += oprot->writeMapEnd();
}
@@ -10246,15 +10604,15 @@ void swap(Schema &a, Schema &b) {
swap(a.__isset, b.__isset);
}
-Schema::Schema(const Schema& other365) {
- fieldSchemas = other365.fieldSchemas;
- properties = other365.properties;
- __isset = other365.__isset;
+Schema::Schema(const Schema& other370) {
+ fieldSchemas = other370.fieldSchemas;
+ properties = other370.properties;
+ __isset = other370.__isset;
}
-Schema& Schema::operator=(const Schema& other366) {
- fieldSchemas = other366.fieldSchemas;
- properties = other366.properties;
- __isset = other366.__isset;
+Schema& Schema::operator=(const Schema& other371) {
+ fieldSchemas = other371.fieldSchemas;
+ properties = other371.properties;
+ __isset = other371.__isset;
return *this;
}
void Schema::printTo(std::ostream& out) const {
@@ -10299,17 +10657,17 @@ uint32_t EnvironmentContext::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->properties.clear();
- uint32_t _size367;
- ::apache::thrift::protocol::TType _ktype368;
- ::apache::thrift::protocol::TType _vtype369;
- xfer += iprot->readMapBegin(_ktype368, _vtype369, _size367);
- uint32_t _i371;
- for (_i371 = 0; _i371 < _size367; ++_i371)
+ uint32_t _size372;
+ ::apache::thrift::protocol::TType _ktype373;
+ ::apache::thrift::protocol::TType _vtype374;
+ xfer += iprot->readMapBegin(_ktype373, _vtype374, _size372);
+ uint32_t _i376;
+ for (_i376 = 0; _i376 < _size372; ++_i376)
{
- std::string _key372;
- xfer += iprot->readString(_key372);
- std::string& _val373 = this->properties[_key372];
- xfer += iprot->readString(_val373);
+ std::string _key377;
+ xfer += iprot->readString(_key377);
+ std::string& _val378 = this->properties[_key377];
+ xfer += iprot->readString(_val378);
}
xfer += iprot->readMapEnd();
}
@@ -10338,11 +10696,11 @@ uint32_t EnvironmentContext::write(::apache::thrift::protocol::TProtocol* oprot)
xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->properties.size()));
- std::map<std::string, std::string> ::const_iterator _iter374;
- for (_iter374 = this->properties.begin(); _iter374 != this->properties.end(); ++_iter374)
+ std::map<std::string, std::string> ::const_iterator _iter379;
+ for (_iter379 = this->properties.begin(); _iter379 != this->properties.end(); ++_iter379)
{
- xfer += oprot->writeString(_iter374->first);
- xfer += oprot->writeString(_iter374->second);
+ xfer += oprot->writeString(_iter379->first);
+ xfer += oprot->writeString(_iter379->second);
}
xfer += oprot->writeMapEnd();
}
@@ -10359,13 +10717,13 @@ void swap(EnvironmentContext &a, EnvironmentContext &b) {
swap(a.__isset, b.__isset);
}
-EnvironmentContext::EnvironmentContext(const EnvironmentContext& other375) {
- properties = other375.properties;
- __isset = other375.__isset;
+EnvironmentContext::EnvironmentContext(const EnvironmentContext& other380) {
+ properties = other380.properties;
+ __isset = other380.__isset;
}
-EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other376) {
- properties = other376.properties;
- __isset = other376.__isset;
+EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other381) {
+ properties = other381.properties;
+ __isset = other381.__isset;
return *this;
}
void EnvironmentContext::printTo(std::ostream& out) const {
@@ -10487,17 +10845,17 @@ void swap(PrimaryKeysRequest &a, PrimaryKeysRequest &b) {
swap(a.__isset, b.__isset);
}
-PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other377) {
- db_name = other377.db_name;
- tbl_name = other377.tbl_name;
- catName = other377.catName;
- __isset = other377.__isset;
+PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other382) {
+ db_name = other382.db_name;
+ tbl_name = other382.tbl_name;
+ catName = other382.catName;
+ __isset = other382.__isset;
}
-PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other378) {
- db_name = other378.db_name;
- tbl_name = other378.tbl_name;
- catName = other378.catName;
- __isset = other378.__isset;
+PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other383) {
+ db_name = other383.db_name;
+ tbl_name = other383.tbl_name;
+ catName = other383.catName;
+ __isset = other383.__isset;
return *this;
}
void PrimaryKeysRequest::printTo(std::ostream& out) const {
@@ -10544,14 +10902,14 @@ uint32_t PrimaryKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->primaryKeys.clear();
- uint32_t _size379;
- ::apache::thrift::protocol::TType _etype382;
- xfer += iprot->readListBegin(_etype382, _size379);
- this->primaryKeys.resize(_size379);
- uint32_t _i383;
- for (_i383 = 0; _i383 < _size379; ++_i383)
+ uint32_t _size384;
+ ::apache::thrift::protocol::TType _etype387;
+ xfer += iprot->readListBegin(_etype387, _size384);
+ this->primaryKeys.resize(_size384);
+ uint32_t _i388;
+ for (_i388 = 0; _i388 < _size384; ++_i388)
{
- xfer += this->primaryKeys[_i383].read(iprot);
+ xfer += this->primaryKeys[_i388].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10582,10 +10940,10 @@ uint32_t PrimaryKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot
xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter384;
- for (_iter384 = this->primaryKeys.begin(); _iter384 != this->primaryKeys.end(); ++_iter384)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter389;
+ for (_iter389 = this->primaryKeys.begin(); _iter389 != this->primaryKeys.end(); ++_iter389)
{
- xfer += (*_iter384).write(oprot);
+ xfer += (*_iter389).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10601,11 +10959,11 @@ void swap(PrimaryKeysResponse &a, PrimaryKeysResponse &b) {
swap(a.primaryKeys, b.primaryKeys);
}
-PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other385) {
- primaryKeys = other385.primaryKeys;
+PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other390) {
+ primaryKeys = other390.primaryKeys;
}
-PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other386) {
- primaryKeys = other386.primaryKeys;
+PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other391) {
+ primaryKeys = other391.primaryKeys;
return *this;
}
void PrimaryKeysResponse::printTo(std::ostream& out) const {
@@ -10755,21 +11113,21 @@ void swap(ForeignKeysRequest &a, ForeignKeysRequest &b) {
swap(a.__isset, b.__isset);
}
-ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other387) {
- parent_db_name = other387.parent_db_name;
- parent_tbl_name = other387.parent_tbl_name;
- foreign_db_name = other387.foreign_db_name;
- foreign_tbl_name = other387.foreign_tbl_name;
- catName = other387.catName;
- __isset = other387.__isset;
-}
-ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other388) {
- parent_db_name = other388.parent_db_name;
- parent_tbl_name = other388.parent_tbl_name;
- foreign_db_name = other388.foreign_db_name;
- foreign_tbl_name = other388.foreign_tbl_name;
- catName = other388.catName;
- __isset = other388.__isset;
+ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other392) {
+ parent_db_name = other392.parent_db_name;
+ parent_tbl_name = other392.parent_tbl_name;
+ foreign_db_name = other392.foreign_db_name;
+ foreign_tbl_name = other392.foreign_tbl_name;
+ catName = other392.catName;
+ __isset = other392.__isset;
+}
+ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other393) {
+ parent_db_name = other393.parent_db_name;
+ parent_tbl_name = other393.parent_tbl_name;
+ foreign_db_name = other393.foreign_db_name;
+ foreign_tbl_name = other393.foreign_tbl_name;
+ catName = other393.catName;
+ __isset = other393.__isset;
return *this;
}
void ForeignKeysRequest::printTo(std::ostream& out) const {
@@ -10818,14 +11176,14 @@ uint32_t ForeignKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot)
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->foreignKeys.clear();
- uint32_t _size389;
- ::apache::thrift::protocol::TType _etype392;
- xfer += iprot->readListBegin(_etype392, _size389);
- this->foreignKeys.resize(_size389);
- uint32_t _i393;
- for (_i393 = 0; _i393 < _size389; ++_i393)
+ uint32_t _size394;
+ ::apache::thrift::protocol::TType _etype397;
+ xfer += iprot->readListBegin(_etype397, _size394);
+ this->foreignKeys.resize(_size394);
+ uint32_t _i398;
+ for (_i398 = 0; _i398 < _size394; ++_i398)
{
- xfer += this->foreignKeys[_i393].read(iprot);
+ xfer += this->foreignKeys[_i398].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10856,10 +11214,10 @@ uint32_t ForeignKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot
xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeys.size()));
- std::vector<SQLForeignKey> ::const_iterator _iter394;
- for (_iter394 = this->foreignKeys.begin(); _iter394 != this->foreignKeys.end(); ++_iter394)
+ std::vector<SQLForeignKey> ::const_iterator _iter399;
+ for (_iter399 = this->foreignKeys.begin(); _iter399 != this->foreignKeys.end(); ++_iter399)
{
- xfer += (*_iter394).write(oprot);
+ xfer += (*_iter399).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10875,11 +11233,11 @@ void swap(ForeignKeysResponse &a, ForeignKeysResponse &b) {
swap(a.foreignKeys, b.foreignKeys);
}
-ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other395) {
- foreignKeys = other395.foreignKeys;
+ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other400) {
+ foreignKeys = other400.foreignKeys;
}
-ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other396) {
- foreignKeys = other396.foreignKeys;
+ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other401) {
+ foreignKeys = other401.foreignKeys;
return *this;
}
void ForeignKeysResponse::printTo(std::ostream& out) const {
@@ -11001,15 +11359,15 @@ void swap(UniqueConstraintsRequest &a, UniqueConstraintsRequest &b) {
swap(a.tbl_name, b.tbl_name);
}
-UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other397) {
- catName = other397.catName;
- db_name = other397.db_name;
- tbl_name = other397.tbl_name;
+UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other402) {
+ catName = other402.catName;
+ db_name = other402.db_name;
+ tbl_name = other402.tbl_name;
}
-UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other398) {
- catName = other398.catName;
- db_name = other398.db_name;
- tbl_name = other398.tbl_name;
+UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other403) {
+ catName = other403.catName;
+ db_name = other403.db_name;
+ tbl_name = other403.tbl_name;
return *this;
}
void UniqueConstraintsRequest::printTo(std::ostream& out) const {
@@ -11056,14 +11414,14 @@ uint32_t UniqueConstraintsResponse::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->uniqueConstraints.clear();
- uint32_t _size399;
- ::apache::thrift::protocol::TType _etype402;
- xfer += iprot->readListBegin(_etype402, _size399);
- this->uniqueConstraints.resize(_size399);
- uint32_t _i403;
- for (_i403 = 0; _i403 < _size399; ++_i403)
+ uint32_t _size404;
+ ::apache::thrift::protocol::TType _etype407;
+ xfer += iprot->readListBegin(_etype407, _size404);
+ this->uniqueConstraints.resize(_size404);
+ uint32_t _i408;
+ for (_i408 = 0; _i408 < _size404; ++_i408)
{
- xfer += this->uniqueConstraints[_i403].read(iprot);
+ xfer += this->uniqueConstraints[_i408].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -11094,10 +11452,10 @@ uint32_t UniqueConstraintsResponse::write(::apache::thrift::protocol::TProtocol*
xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->uniqueConstraints.size()));
- std::vector<SQLUniqueConstraint> ::const_iterator _iter404;
- for (_iter404 = this->uniqueConstraints.begin(); _iter404 != this->uniqueConstraints.end(); ++_iter404)
+ std::vector<SQLUniqueConstraint> ::const_iterator _iter409;
+ for (_iter409 = this->uniqueConstraints.begin(); _iter409 != this->uniqueConstraints.end(); ++_iter409)
{
- xfer += (*_iter404).write(oprot);
+ xfer += (*_iter409).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -11113,11 +11471,11 @@ void swap(UniqueConstraintsResponse &a, UniqueConstraintsResponse &b) {
swap(a.uniqueConstraints, b.uniqueConstraints);
}
-UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other405) {
- uniqueConstraints = other405.uniqueConstraints;
+UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other410) {
+ uniqueConstraints = other410.uniqueConstraints;
}
-UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other406) {
- uniqueConstraints = other406.uniqueConstraints;
+UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other411) {
+ uniqueConstraints = other411.uniqueConstraints;
return *this;
}
void UniqueConstraintsResponse::printTo(std::ostream& out) const {
@@ -11239,15 +11597,15 @@ void swap(NotNullConstraintsRequest &a, NotNullConstraintsRequest &b) {
swap(a.tbl_name, b.tbl_name);
}
-NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other407) {
- catName = other407.catName;
- db_name = other407.db_name;
- tbl_name = other407.tbl_name;
+NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other412) {
+ catName = other412.catName;
+ db_name = other412.db_name;
+ tbl_name = other412.tbl_name;
}
-NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other408) {
- catName = other408.catName;
- db_name = other408.db_name;
- tbl_name = other408.tbl_name;
+NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other413) {
+ catName = other413.catName;
+ db_name = other413.db_name;
+ tbl_name = other413.tbl_name;
return *this;
}
void NotNullConstraintsRequest::printTo(std::ostream& out) const {
@@ -11294,14 +11652,14 @@ uint32_t NotNullConstraintsResponse::read(::apache::thrift::protocol::TProtocol*
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->notNullConstraints.clear();
- uint32_t _size409;
- ::apache::thrift::protocol::TType _etype412;
- xfer += iprot->readListBegin(_etype412, _size409);
- this->notNullConstraints.resize(_size409);
- uint32_t _i413;
- for (_i413 = 0; _i413 < _size409; ++_i413)
+ uint32_t _size414;
+ ::apache::thrift::protocol::TType _etype417;
+ xfer += iprot->readListBegin(_etype417, _size414);
+ this->notNullConstraints.resize(_size414);
+ uint32_t _i418;
+ for (_i418 = 0; _i418 < _size414; ++_i418)
{
- xfer += this->notNullConstraints[_i413].read(iprot);
+ xfer += this->notNullConstraints[_i418].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -11332,10 +11690,10 @@ uint32_t NotNullConstraintsResponse::write(::apache::thrift::protocol::TProtocol
xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->notNullConstraints.size()));
- std::vector<SQLNotNullConstraint> ::const_iterator _iter414;
- for (_iter414 = this->notNullConstraints.begin(); _iter414 != this->notNullConstraints.end(); ++_iter414)
+ std::vector<SQLNotNullConstraint> ::const_iterator _iter419;
+ for (_iter419 = this->notNullConstraints.begin(); _iter419 != this->notNullConstraints.end(); ++_iter419)
{
- xfer += (*_iter414).write(oprot);
+ xfer += (*_iter419).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -11351,11 +11709,11 @@ void swap(NotNullConstraintsResponse &a, NotNullConstraintsResponse &b) {
swap(a.notNullConstraints, b.notNullConstraints);
}
-NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other415) {
- notNullConstraints = other415.notNullConstraints;
+NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other420) {
+ notNullConstraints = other420.notNullConstraints;
}
-NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other416) {
- notNullConstraints = other416.notNullConstraints;
+NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other421) {
+ notNullConstraints = other421.notNullConstraints;
return *this;
}
void NotNullConstraintsResponse::printTo(std::
<TRUNCATED>
[40/67] [abbrv] hive git commit: HIVE-19903: Disable temporary
insert-only transactional table (Steve Yeom, reviewed by Jason Dere)
Posted by se...@apache.org.
HIVE-19903: Disable temporary insert-only transactional table (Steve Yeom, reviewed by Jason Dere)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/766c3dc2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/766c3dc2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/766c3dc2
Branch: refs/heads/master-txnstats
Commit: 766c3dc21e189afbecace308cd24cd1c5bde09b2
Parents: f83d765
Author: Jason Dere <jd...@hortonworks.com>
Authored: Sun Jun 17 21:38:05 2018 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Sun Jun 17 21:38:05 2018 -0700
----------------------------------------------------------------------
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 2 +-
.../test/queries/clientpositive/mm_iow_temp.q | 15 +++++
.../results/clientpositive/mm_iow_temp.q.out | 61 ++++++++++++++++++++
3 files changed, 77 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/766c3dc2/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 284fcac..c2bcedd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -12847,7 +12847,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
}
}
}
- boolean makeInsertOnly = HiveConf.getBoolVar(conf, ConfVars.HIVE_CREATE_TABLES_AS_INSERT_ONLY);
+ boolean makeInsertOnly = !isTemporaryTable && HiveConf.getBoolVar(conf, ConfVars.HIVE_CREATE_TABLES_AS_INSERT_ONLY);
boolean makeAcid = !isTemporaryTable &&
MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID) &&
HiveConf.getBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY) &&
http://git-wip-us.apache.org/repos/asf/hive/blob/766c3dc2/ql/src/test/queries/clientpositive/mm_iow_temp.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mm_iow_temp.q b/ql/src/test/queries/clientpositive/mm_iow_temp.q
new file mode 100644
index 0000000..d6942e4
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/mm_iow_temp.q
@@ -0,0 +1,15 @@
+--! qt:dataset:src
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.create.as.insert.only=true;
+
+create temporary table temptable1 (
+ key string,
+ value string
+);
+
+insert overwrite table temptable1 select * from src;
+
+show create table temptable1;
+select * from temptable1 order by key limit 10;
http://git-wip-us.apache.org/repos/asf/hive/blob/766c3dc2/ql/src/test/results/clientpositive/mm_iow_temp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mm_iow_temp.q.out b/ql/src/test/results/clientpositive/mm_iow_temp.q.out
new file mode 100644
index 0000000..719a48a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/mm_iow_temp.q.out
@@ -0,0 +1,61 @@
+PREHOOK: query: create temporary table temptable1 (
+ key string,
+ value string
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@temptable1
+POSTHOOK: query: create temporary table temptable1 (
+ key string,
+ value string
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@temptable1
+PREHOOK: query: insert overwrite table temptable1 select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@temptable1
+POSTHOOK: query: insert overwrite table temptable1 select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@temptable1
+POSTHOOK: Lineage: temptable1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: temptable1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: show create table temptable1
+PREHOOK: type: SHOW_CREATETABLE
+PREHOOK: Input: default@temptable1
+POSTHOOK: query: show create table temptable1
+POSTHOOK: type: SHOW_CREATETABLE
+POSTHOOK: Input: default@temptable1
+CREATE TEMPORARY TABLE `temptable1`(
+ `key` string,
+ `value` string)
+ROW FORMAT SERDE
+ 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+STORED AS INPUTFORMAT
+ 'org.apache.hadoop.mapred.TextInputFormat'
+OUTPUTFORMAT
+ 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
+LOCATION
+#### A masked pattern was here ####
+TBLPROPERTIES (
+ 'bucketing_version'='2')
+PREHOOK: query: select * from temptable1 order by key limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@temptable1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from temptable1 order by key limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@temptable1
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+103 val_103
+103 val_103
+104 val_104
+104 val_104
[39/67] [abbrv] hive git commit: HIVE-19880: Repl Load to return
recoverable vs non-recoverable error codes (Mahesh Kumar Behera,
reviewed by Sankar Hariappan)
Posted by se...@apache.org.
HIVE-19880: Repl Load to return recoverable vs non-recoverable error codes (Mahesh Kumar Behera, reviewed by Sankar Hariappan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f83d7654
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f83d7654
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f83d7654
Branch: refs/heads/master-txnstats
Commit: f83d7654ee8e6758c0026ed53a3a928914640e38
Parents: 24da460
Author: Sankar Hariappan <sa...@apache.org>
Authored: Sun Jun 17 17:28:02 2018 -0700
Committer: Sankar Hariappan <sa...@apache.org>
Committed: Sun Jun 17 17:28:02 2018 -0700
----------------------------------------------------------------------
.../hive/ql/parse/TestReplicationScenarios.java | 30 +++++++++++++++++++-
.../org/apache/hive/jdbc/TestJdbcDriver2.java | 22 ++++++++++++++
.../org/apache/hadoop/hive/ql/ErrorMsg.java | 11 +++++++
.../hadoop/hive/ql/exec/ReplCopyTask.java | 3 +-
.../hadoop/hive/ql/exec/repl/ReplDumpTask.java | 3 +-
.../ql/exec/repl/bootstrap/ReplLoadTask.java | 3 +-
.../filesystem/DatabaseEventsIterator.java | 4 +--
.../ql/parse/ReplicationSemanticAnalyzer.java | 22 +++++++-------
.../hadoop/hive/ql/parse/repl/CopyUtils.java | 16 ++++++-----
.../hive/ql/parse/repl/dump/TableExport.java | 2 +-
.../ql/parse/repl/dump/io/FileOperations.java | 5 ++--
.../hive/metastore/HiveMetaStoreClient.java | 5 +++-
.../hive/metastore/messaging/EventUtils.java | 4 +--
13 files changed, 99 insertions(+), 31 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index 862140f..689c859 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -73,6 +73,7 @@ import org.junit.rules.TestName;
import org.junit.rules.TestRule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hive.ql.ErrorMsg;
import javax.annotation.Nullable;
@@ -853,7 +854,8 @@ public class TestReplicationScenarios {
InjectableBehaviourObjectStore.setGetNextNotificationBehaviour(eventIdSkipper);
advanceDumpDir();
- verifyFail("REPL DUMP " + dbName + " FROM " + replDumpId, driver);
+ CommandProcessorResponse ret = driver.run("REPL DUMP " + dbName + " FROM " + replDumpId);
+ assertTrue(ret.getResponseCode() == ErrorMsg.REPL_EVENTS_MISSING_IN_METASTORE.getErrorCode());
eventIdSkipper.assertInjectionsPerformed(true,false);
InjectableBehaviourObjectStore.resetGetNextNotificationBehaviour(); // reset the behaviour
}
@@ -3158,6 +3160,32 @@ public class TestReplicationScenarios {
}
@Test
+ public void testLoadCmPathMissing() throws IOException {
+ String dbName = createDB(testName.getMethodName(), driver);
+ run("CREATE TABLE " + dbName + ".normal(a int)", driver);
+ run("INSERT INTO " + dbName + ".normal values (1)", driver);
+
+ advanceDumpDir();
+ run("repl dump " + dbName, true, driver);
+ String dumpLocation = getResult(0, 0, driver);
+
+ run("DROP TABLE " + dbName + ".normal", driver);
+
+ String cmDir = hconf.getVar(HiveConf.ConfVars.REPLCMDIR);
+ Path path = new Path(cmDir);
+ FileSystem fs = path.getFileSystem(hconf);
+ ContentSummary cs = fs.getContentSummary(path);
+ long fileCount = cs.getFileCount();
+ assertTrue(fileCount != 0);
+ fs.delete(path);
+
+ CommandProcessorResponse ret = driverMirror.run("REPL LOAD " + dbName + " FROM '" + dumpLocation + "'");
+ assertTrue(ret.getResponseCode() == ErrorMsg.REPL_FILE_MISSING_FROM_SRC_AND_CM_PATH.getErrorCode());
+ run("drop database " + dbName, true, driver);
+ fs.create(path, false);
+ }
+
+ @Test
public void testDumpNonReplDatabase() throws IOException {
String dbName = createDBNonRepl(testName.getMethodName(), driver);
verifyFail("REPL DUMP " + dbName, driver);
http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index d47c136..850b2d5 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -76,6 +76,7 @@ import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Pattern;
+import org.apache.hadoop.hive.ql.ErrorMsg;
import static org.apache.hadoop.hive.conf.SystemVariables.SET_COLUMN_NAME;
import static org.apache.hadoop.hive.ql.exec.ExplainTask.EXPL_COLUMN_NAME;
@@ -2927,6 +2928,27 @@ public class TestJdbcDriver2 {
stmt.close();
}
+ @Test
+ public void testReplErrorScenarios() throws Exception {
+ HiveStatement stmt = (HiveStatement) con.createStatement();
+
+ try {
+ // source of replication not set
+ stmt.execute("repl dump default");
+ } catch(SQLException e){
+ assertTrue(e.getErrorCode() == ErrorMsg.REPL_DATABASE_IS_NOT_SOURCE_OF_REPLICATION.getErrorCode());
+ }
+
+ try {
+ // invalid load path
+ stmt.execute("repl load default1 from '/tmp/junk'");
+ } catch(SQLException e){
+ assertTrue(e.getErrorCode() == ErrorMsg.REPL_LOAD_PATH_NOT_FOUND.getErrorCode());
+ }
+
+ stmt.close();
+ }
+
/**
* Test {@link HiveStatement#executeAsync(String)} for an insert overwrite into a table
* @throws Exception
http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index bc2cffa..90d6b8f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -499,6 +499,15 @@ public enum ErrorMsg {
" queue: {1}. Please fix and try again.", true),
SPARK_RUNTIME_OOM(20015, "Spark job failed because of out of memory."),
+ //if the error message is changed for REPL_EVENTS_MISSING_IN_METASTORE, then need modification in getNextNotification
+ //method in HiveMetaStoreClient
+ REPL_EVENTS_MISSING_IN_METASTORE(20016, "Notification events are missing in the meta store."),
+ REPL_BOOTSTRAP_LOAD_PATH_NOT_VALID(20017, "Target database is bootstrapped from some other path."),
+ REPL_FILE_MISSING_FROM_SRC_AND_CM_PATH(20018, "File is missing from both source and cm path."),
+ REPL_LOAD_PATH_NOT_FOUND(20019, "Load path does not exist."),
+ REPL_DATABASE_IS_NOT_SOURCE_OF_REPLICATION(20020,
+ "Source of replication (repl.source.for) is not set in the database properties."),
+
// An exception from runtime that will show the full stack to client
UNRESOLVED_RT_EXCEPTION(29999, "Runtime Error: {0}", "58004", true),
@@ -588,6 +597,8 @@ public enum ErrorMsg {
SPARK_GET_JOB_INFO_INTERRUPTED(30045, "Spark job was interrupted while getting job info"),
SPARK_GET_JOB_INFO_EXECUTIONERROR(30046, "Spark job failed in execution while getting job info due to exception {0}"),
+ REPL_FILE_SYSTEM_OPERATION_RETRY(30047, "Replication file system operation retry expired."),
+
//========================== 40000 range starts here ========================//
SPARK_JOB_RUNTIME_ERROR(40001, "Spark job failed due to: {0}", true),
http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
index 8a89103..3a7f1bc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.exec;
import org.apache.hadoop.hive.metastore.ReplChangeManager;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.parse.EximUtil;
import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
import org.apache.hadoop.hive.ql.plan.CopyWork;
@@ -165,7 +166,7 @@ public class ReplCopyTask extends Task<ReplCopyWork> implements Serializable {
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
setException(e);
- return (1);
+ return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
index ccdf04a..7e5f805 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hive.metastore.messaging.event.filters.DatabaseAndTable
import org.apache.hadoop.hive.metastore.messaging.event.filters.EventBoundaryFilter;
import org.apache.hadoop.hive.metastore.messaging.event.filters.MessageFormatFilter;
import org.apache.hadoop.hive.ql.DriverContext;
+import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.lockmgr.LockException;
@@ -123,7 +124,7 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
} catch (Exception e) {
LOG.error("failed", e);
setException(e);
- return 1;
+ return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
}
return 0;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/ReplLoadTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/ReplLoadTask.java
index 76fb2a3..50fe3ac 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/ReplLoadTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/ReplLoadTask.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.exec.repl.bootstrap;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.ql.DriverContext;
+import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.TaskFactory;
import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork;
@@ -223,7 +224,7 @@ public class ReplLoadTask extends Task<ReplLoadWork> implements Serializable {
} catch (Exception e) {
LOG.error("failed replication", e);
setException(e);
- return 1;
+ return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode();
}
LOG.info("completed load task run : {}", work.executedLoadTask());
return 0;
http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/DatabaseEventsIterator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/DatabaseEventsIterator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/DatabaseEventsIterator.java
index ecedf9b..f778cb4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/DatabaseEventsIterator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/DatabaseEventsIterator.java
@@ -91,8 +91,8 @@ class DatabaseEventsIterator implements Iterator<BootstrapEvent> {
return true;
} catch (Exception e) {
// may be do some retry logic here.
- throw new RuntimeException("could not traverse the file via remote iterator " + dbLevelPath,
- e);
+ LOG.error("could not traverse the file via remote iterator " + dbLevelPath, e);
+ throw new RuntimeException(e.getMessage(), e);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
index 9753b5c..5aeae16 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
@@ -60,6 +60,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY;
import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DBNAME;
import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_LIMIT;
import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_REPL_CONFIG;
@@ -109,7 +110,7 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
try {
initReplDump(ast);
} catch (HiveException e) {
- throw new SemanticException("repl dump failed " + e.getMessage());
+ throw new SemanticException(e.getMessage(), e);
}
analyzeReplDump(ast);
break;
@@ -146,11 +147,8 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
if (null != replConfigs) {
for (Map.Entry<String, String> config : replConfigs.entrySet()) {
conf.set(config.getKey(), config.getValue());
- if ("hive.repl.dump.metadata.only".equalsIgnoreCase(config.getKey()) &&
- "true".equalsIgnoreCase(config.getValue())) {
- isMetaDataOnly = true;
- }
}
+ isMetaDataOnly = HiveConf.getBoolVar(conf, REPL_DUMP_METADATA_ONLY);
}
} else if (ast.getChild(currNode).getType() == TOK_TABNAME) {
// optional tblName was specified.
@@ -184,12 +182,13 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
for (String dbName : Utils.matchesDb(db, dbNameOrPattern)) {
Database database = db.getDatabase(dbName);
if (database != null) {
- if (!ReplChangeManager.isSourceOfReplication(database) && !isMetaDataOnly) {
- throw new SemanticException("Cannot dump database " + dbName +
- " as it is not a source of replication");
+ if (!isMetaDataOnly && !ReplChangeManager.isSourceOfReplication(database)) {
+ LOG.error("Cannot dump database " + dbNameOrPattern +
+ " as it is not a source of replication (repl.source.for)");
+ throw new SemanticException(ErrorMsg.REPL_DATABASE_IS_NOT_SOURCE_OF_REPLICATION.getMsg());
}
} else {
- throw new SemanticException("Cannot dump database " + dbName + " as it does not exist");
+ throw new SemanticException("Cannot dump database " + dbNameOrPattern + " as it does not exist");
}
}
}
@@ -365,7 +364,8 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
if (!fs.exists(loadPath)) {
// supposed dump path does not exist.
- throw new FileNotFoundException(loadPath.toUri().toString());
+ LOG.error("File not found " + loadPath.toUri().toString());
+ throw new FileNotFoundException(ErrorMsg.REPL_LOAD_PATH_NOT_FOUND.getMsg());
}
// Now, the dumped path can be one of three things:
@@ -511,7 +511,7 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
} catch (Exception e) {
// TODO : simple wrap & rethrow for now, clean up with error codes
- throw new SemanticException(e);
+ throw new SemanticException(e.getMessage(), e);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/CopyUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/CopyUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/CopyUtils.java
index 79b4652..61bf6b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/CopyUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/CopyUtils.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.ReplChangeManager;
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.metadata.HiveFatalException;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.hive.shims.Utils;
import org.apache.hadoop.security.UserGroupInformation;
@@ -68,7 +70,7 @@ public class CopyUtils {
// changed/removed during copy, so double check the checksum after copy,
// if not match, copy again from cm
public void copyAndVerify(FileSystem destinationFs, Path destRoot,
- List<ReplChangeManager.FileInfo> srcFiles) throws IOException, LoginException {
+ List<ReplChangeManager.FileInfo> srcFiles) throws IOException, LoginException, HiveFatalException {
Map<FileSystem, Map< Path, List<ReplChangeManager.FileInfo>>> map = fsToFileMap(srcFiles, destRoot);
for (Map.Entry<FileSystem, Map<Path, List<ReplChangeManager.FileInfo>>> entry : map.entrySet()) {
FileSystem sourceFs = entry.getKey();
@@ -92,7 +94,7 @@ public class CopyUtils {
private void doCopyRetry(FileSystem sourceFs, List<ReplChangeManager.FileInfo> srcFileList,
FileSystem destinationFs, Path destination,
- boolean useRegularCopy) throws IOException, LoginException {
+ boolean useRegularCopy) throws IOException, LoginException, HiveFatalException {
int repeat = 0;
boolean isCopyError = false;
List<Path> pathList = Lists.transform(srcFileList, ReplChangeManager.FileInfo::getEffectivePath);
@@ -145,7 +147,7 @@ public class CopyUtils {
// If still files remains to be copied due to failure/checksum mismatch after several attempts, then throw error
if (!pathList.isEmpty()) {
LOG.error("File copy failed even after several attempts. Files list: " + pathList);
- throw new IOException("File copy failed even after several attempts.");
+ throw new IOException(ErrorMsg.REPL_FILE_SYSTEM_OPERATION_RETRY.getMsg());
}
}
@@ -154,7 +156,7 @@ public class CopyUtils {
// itself is missing, then throw error.
private List<Path> getFilesToRetry(FileSystem sourceFs, List<ReplChangeManager.FileInfo> srcFileList,
FileSystem destinationFs, Path destination, boolean isCopyError)
- throws IOException {
+ throws IOException, HiveFatalException {
List<Path> pathList = new ArrayList<Path>();
// Going through file list and make the retry list
@@ -190,9 +192,9 @@ public class CopyUtils {
srcPath = srcFile.getEffectivePath();
if (null == srcPath) {
// This case possible if CM path is not enabled.
- LOG.error("File copy failed and likely source file is deleted or modified. "
+ LOG.error("File copy failed and likely source file is deleted or modified."
+ "Source File: " + srcFile.getSourcePath());
- throw new IOException("File copy failed and likely source file is deleted or modified.");
+ throw new HiveFatalException(ErrorMsg.REPL_FILE_MISSING_FROM_SRC_AND_CM_PATH.getMsg());
}
if (!srcFile.isUseSourcePath() && !sourceFs.exists(srcFile.getCmPath())) {
@@ -201,7 +203,7 @@ public class CopyUtils {
+ "Missing Source File: " + srcFile.getSourcePath() + ", CM File: " + srcFile.getCmPath() + ". "
+ "Try setting higher value for hive.repl.cm.retain in source warehouse. "
+ "Also, bootstrap the system again to get back the consistent replicated state.");
- throw new IOException("Both source and CM path are missing from source.");
+ throw new HiveFatalException(ErrorMsg.REPL_FILE_MISSING_FROM_SRC_AND_CM_PATH.getMsg());
}
pathList.add(srcPath);
http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
index 20ff23a..b60be88 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java
@@ -162,7 +162,7 @@ public class TableExport {
.export(replicationSpec);
}
} catch (Exception e) {
- throw new SemanticException(e);
+ throw new SemanticException(e.getMessage(), e);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java
index c923121..58eae38 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java
@@ -21,12 +21,10 @@ import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
import javax.security.auth.login.LoginException;
-import org.apache.curator.shaded.com.google.common.collect.Lists;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -34,6 +32,7 @@ import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.ReplChangeManager;
+import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.HiveInputFormat;
@@ -161,7 +160,7 @@ public class FileOperations {
logger.info("writeFilesList failed", e);
if (repeat >= FileUtils.MAX_IO_ERROR_RETRY) {
logger.error("exporting data files in dir : " + dataPathList + " to " + exportRootDataDir + " failed");
- throw e;
+ throw new IOException(ErrorMsg.REPL_FILE_SYSTEM_OPERATION_RETRY.getMsg());
}
int sleepTime = FileUtils.getSleepTime(repeat - 1);
http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 8990928..da41e6e 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -128,6 +128,9 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
private long retryDelaySeconds = 0;
private final ClientCapabilities version;
+ //copied from ErrorMsg.java
+ private static final String REPL_EVENTS_MISSING_IN_METASTORE = "Notification events are missing in the meta store.";
+
static final protected Logger LOG = LoggerFactory.getLogger(HiveMetaStoreClient.class);
public HiveMetaStoreClient(Configuration conf) throws MetaException {
@@ -2717,7 +2720,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
+ "Try setting higher value for hive.metastore.event.db.listener.timetolive. "
+ "Also, bootstrap the system again to get back the consistent replicated state.",
nextEventId, e.getEventId());
- throw new IllegalStateException("Notification events are missing.");
+ throw new IllegalStateException(REPL_EVENTS_MISSING_IN_METASTORE);
}
if ((filter != null) && filter.accept(e)) {
filtered.addToEvents(e);
http://git-wip-us.apache.org/repos/asf/hive/blob/f83d7654/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java
index 7d8c1d4..2b16897 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java
@@ -93,7 +93,7 @@ public class EventUtils {
try {
return msc.getNextNotification(pos,getBatchSize(), filter).getEvents();
} catch (TException e) {
- throw new IOException(e);
+ throw new IOException(e.getMessage(), e);
}
}
}
@@ -179,7 +179,7 @@ public class EventUtils {
// but throwing the exception is the appropriate result here, and hasNext()
// signature will only allow RuntimeExceptions. Iterator.hasNext() really
// should have allowed IOExceptions
- throw new RuntimeException(e);
+ throw new RuntimeException(e.getMessage(), e);
}
// New batch has been fetched. If it's not empty, we have more elements to process.
return !batch.isEmpty();
[21/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out
index 9e1c8d7..4c9b737 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_udf2.q.out
@@ -86,12 +86,12 @@ STAGE PLANS:
Statistics: Num rows: 39 Data size: 4032 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(14,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(14,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterDecimalColEqualDecimalScalar(col 0:decimal(14,5), val 10)
+ predicateExpression: FilterDecimal64ColEqualDecimal64Scalar(col 0:decimal(14,5)/DECIMAL_64, val 1000000)
predicate: (key = 10) (type: boolean)
Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -118,8 +118,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -127,7 +127,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(14,5), value:int
+ dataColumns: key:decimal(14,5)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: [double, double, double, double, double, double, double]
@@ -195,12 +195,12 @@ STAGE PLANS:
Statistics: Num rows: 39 Data size: 4188 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(14,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(14,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterDecimalColEqualDecimalScalar(col 0:decimal(14,5), val 10)
+ predicateExpression: FilterDecimal64ColEqualDecimal64Scalar(col 0:decimal(14,5)/DECIMAL_64, val 1000000)
predicate: (key = 10) (type: boolean)
Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -227,8 +227,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -236,7 +236,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(14,5), value:int
+ dataColumns: key:decimal(14,5)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: [double, double, double, double, double, double, double, double]
@@ -310,12 +310,12 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(14,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(14,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterDecimalColEqualDecimalScalar(col 0:decimal(14,5), val 10)
+ predicateExpression: FilterDecimal64ColEqualDecimal64Scalar(col 0:decimal(14,5)/DECIMAL_64, val 1000000)
predicate: (key = 10) (type: boolean)
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -343,8 +343,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -352,7 +351,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(14,5), value:int
+ dataColumns: key:decimal(14,5)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: [double, double, double, double, double, double, double]
@@ -420,12 +419,12 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(14,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(14,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterDecimalColEqualDecimalScalar(col 0:decimal(14,5), val 10)
+ predicateExpression: FilterDecimal64ColEqualDecimal64Scalar(col 0:decimal(14,5)/DECIMAL_64, val 1000000)
predicate: (key = 10) (type: boolean)
Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -453,8 +452,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -462,7 +460,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(14,5), value:int
+ dataColumns: key:decimal(14,5)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: [double, double, double, double, double, double, double, double]
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out
index bf272d0..747b74a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_distinct_2.q.out
@@ -164,8 +164,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_elt.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_elt.q.out b/ql/src/test/results/clientpositive/llap/vector_elt.q.out
index 7303886..5745af2 100644
--- a/ql/src/test/results/clientpositive/llap/vector_elt.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_elt.q.out
@@ -63,8 +63,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -174,8 +174,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
index 342da4e..6912d7b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby4.q.out
@@ -77,8 +77,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
index d0b3395..d3c6548 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby6.q.out
@@ -77,8 +77,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out
index ac6c589..a118b2e 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_3.q.out
@@ -167,8 +167,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out
index 2ea9018..5c0d6bb 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_cube1.q.out
@@ -84,8 +84,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -222,8 +221,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -386,8 +384,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -648,8 +645,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1038,8 +1034,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id1.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id1.q.out
index 26e31d0..1ffa0fd 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id1.q.out
@@ -94,8 +94,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -253,8 +253,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -412,8 +412,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -565,8 +565,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -718,8 +718,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -878,8 +878,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id2.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id2.q.out
index dbc9c9a..dce2930 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id2.q.out
@@ -99,8 +99,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -301,8 +301,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -512,8 +512,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -819,8 +819,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1122,8 +1122,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1450,8 +1450,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1771,8 +1771,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1936,8 +1936,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2165,8 +2165,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out
index 8d3c152..02f4683 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_id3.q.out
@@ -113,8 +113,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -281,8 +281,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets1.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets1.q.out
index 315e7c7..1229c6d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets1.q.out
@@ -113,8 +113,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -275,8 +275,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -437,8 +437,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -599,8 +599,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -754,8 +754,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -910,8 +910,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1052,8 +1052,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets2.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets2.q.out
index 38cafb0..f8220e1 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets2.q.out
@@ -99,8 +99,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -280,8 +280,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -684,8 +684,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets3_dec.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets3_dec.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets3_dec.q.out
index 7c77c4b..d00306b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets3_dec.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets3_dec.q.out
@@ -66,7 +66,7 @@ STAGE PLANS:
Statistics: Num rows: 12 Data size: 5760 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:a:string, 1:b:string, 2:c_dec:decimal(10,2), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:a:string, 1:b:string, 2:c_dec:decimal(10,2)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: a (type: string), b (type: string), c_dec (type: decimal(10,2))
outputColumnNames: a, b, c_dec
@@ -78,7 +78,7 @@ STAGE PLANS:
Group By Operator
aggregations: sum(c_dec), count(c_dec), count()
Group By Vectorization:
- aggregators: VectorUDAFSumDecimal(col 2:decimal(10,2)) -> decimal(20,2), VectorUDAFCount(col 2:decimal(10,2)) -> bigint, VectorUDAFCountStar(*) -> bigint
+ aggregators: VectorUDAFSumDecimal64ToDecimal(col 2:decimal(10,2)/DECIMAL_64) -> decimal(20,2), VectorUDAFCount(col 2:decimal(10,2)/DECIMAL_64) -> bigint, VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 4:bigint
@@ -106,8 +106,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -115,7 +115,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0, 1, 2]
- dataColumns: a:string, b:string, c_dec:decimal(10,2)
+ dataColumns: a:string, b:string, c_dec:decimal(10,2)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: [bigint]
Reducer 2
@@ -205,7 +205,7 @@ STAGE PLANS:
Statistics: Num rows: 12 Data size: 5760 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:a:string, 1:b:string, 2:c_dec:decimal(10,2), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:a:string, 1:b:string, 2:c_dec:decimal(10,2)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: a (type: string), b (type: string), c_dec (type: decimal(10,2))
outputColumnNames: a, b, c_dec
@@ -217,7 +217,7 @@ STAGE PLANS:
Group By Operator
aggregations: sum(c_dec), count(c_dec), count()
Group By Vectorization:
- aggregators: VectorUDAFSumDecimal(col 2:decimal(10,2)) -> decimal(20,2), VectorUDAFCount(col 2:decimal(10,2)) -> bigint, VectorUDAFCountStar(*) -> bigint
+ aggregators: VectorUDAFSumDecimal64ToDecimal(col 2:decimal(10,2)/DECIMAL_64) -> decimal(20,2), VectorUDAFCount(col 2:decimal(10,2)/DECIMAL_64) -> bigint, VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 4:bigint
@@ -245,8 +245,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -254,7 +254,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0, 1, 2]
- dataColumns: a:string, b:string, c_dec:decimal(10,2)
+ dataColumns: a:string, b:string, c_dec:decimal(10,2)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: [bigint]
Reducer 2
@@ -370,7 +370,7 @@ STAGE PLANS:
Statistics: Num rows: 12 Data size: 5760 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:a:string, 1:b:string, 2:c_dec:decimal(10,2), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:a:string, 1:b:string, 2:c_dec:decimal(10,2)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: a (type: string), b (type: string), c_dec (type: decimal(10,2))
outputColumnNames: a, b, c_dec
@@ -382,7 +382,7 @@ STAGE PLANS:
Group By Operator
aggregations: sum(c_dec), count(c_dec), count()
Group By Vectorization:
- aggregators: VectorUDAFSumDecimal(col 2:decimal(10,2)) -> decimal(20,2), VectorUDAFCount(col 2:decimal(10,2)) -> bigint, VectorUDAFCountStar(*) -> bigint
+ aggregators: VectorUDAFSumDecimal64ToDecimal(col 2:decimal(10,2)/DECIMAL_64) -> decimal(20,2), VectorUDAFCount(col 2:decimal(10,2)/DECIMAL_64) -> bigint, VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string
@@ -410,8 +410,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -419,7 +419,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0, 1, 2]
- dataColumns: a:string, b:string, c_dec:decimal(10,2)
+ dataColumns: a:string, b:string, c_dec:decimal(10,2)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets4.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets4.q.out
index 285c154..a262f26 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets4.q.out
@@ -119,8 +119,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -363,8 +363,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -626,8 +626,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets5.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets5.q.out
index 586d713..bbfba28 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets5.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets5.q.out
@@ -98,8 +98,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -290,8 +290,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -509,8 +509,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets6.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets6.q.out
index 069594e..e26b6c5 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets6.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets6.q.out
@@ -98,8 +98,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -241,8 +241,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_grouping.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_grouping.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_grouping.q.out
index 0999c30..9501927 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_grouping.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_grouping.q.out
@@ -98,8 +98,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -259,8 +259,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -427,8 +427,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -596,8 +596,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -802,8 +802,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -963,8 +963,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1138,8 +1138,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1300,8 +1300,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1500,8 +1500,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1656,8 +1656,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1814,8 +1814,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1963,8 +1963,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2129,8 +2129,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2295,8 +2295,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2456,8 +2456,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out
index c4b7fc3..bddde5f 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_sets_limit.q.out
@@ -100,8 +100,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -301,8 +301,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -502,8 +502,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -700,8 +700,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -895,8 +895,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1080,8 +1080,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_window.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_window.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_window.q.out
index 58e184d..5d81631 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_window.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_grouping_window.q.out
@@ -97,8 +97,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out
index b99a4ac..e6628ab 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_mapjoin.q.out
@@ -61,8 +61,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -129,8 +128,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
index df9a46e..b2953fd 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_reduce.q.out
@@ -293,8 +293,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -491,8 +491,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -784,8 +784,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1008,8 +1008,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out
index 9d36c65..d1f8ac5 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_rollup1.q.out
@@ -95,8 +95,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -351,8 +351,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -732,8 +732,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_sort_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_sort_11.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_sort_11.q.out
index b772e9a..ab29314 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_sort_11.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_sort_11.q.out
@@ -98,8 +98,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -521,8 +520,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -706,8 +704,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -887,8 +884,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_groupby_sort_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_groupby_sort_8.q.out b/ql/src/test/results/clientpositive/llap/vector_groupby_sort_8.q.out
index 74ad6ae..5e946c4 100644
--- a/ql/src/test/results/clientpositive/llap/vector_groupby_sort_8.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_groupby_sort_8.q.out
@@ -107,8 +107,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out b/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out
index f355d4b..e0d533f 100644
--- a/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_grouping_sets.q.out
@@ -190,8 +190,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -321,8 +321,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out b/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out
index d2edc1f..a88e385 100644
--- a/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_if_expr.q.out
@@ -57,8 +57,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_if_expr_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_if_expr_2.q.out b/ql/src/test/results/clientpositive/llap/vector_if_expr_2.q.out
index f4baa69..ddcd2a0 100644
--- a/ql/src/test/results/clientpositive/llap/vector_if_expr_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_if_expr_2.q.out
@@ -72,8 +72,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out b/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out
index 1e8a942..041990a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_include_no_sel.q.out
@@ -211,8 +211,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -238,8 +238,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out b/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out
index bb555df..fb3d7cb 100644
--- a/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_inner_join.q.out
@@ -117,8 +117,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -168,8 +168,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -276,8 +276,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -339,8 +339,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -491,8 +491,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -543,8 +543,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -635,8 +635,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -704,8 +704,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -822,8 +822,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -874,8 +874,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -991,8 +991,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1043,8 +1043,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1160,8 +1160,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1212,8 +1212,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1304,8 +1304,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1381,8 +1381,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1473,8 +1473,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1550,8 +1550,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
[50/67] [abbrv] hive git commit: HIVE-19853: Arrow serializer needs
to create a TimeStampMicroTZVector instead of TimeStampMicroVector (Teddy
Choi, reviewed by Matt McCline)
Posted by se...@apache.org.
HIVE-19853: Arrow serializer needs to create a TimeStampMicroTZVector instead of TimeStampMicroVector (Teddy Choi, reviewed by Matt McCline)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1a610cc5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1a610cc5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1a610cc5
Branch: refs/heads/master-txnstats
Commit: 1a610cc545d39b9e9116c5b90108197853d0364c
Parents: c4eb647
Author: Matt McCline <mm...@hortonworks.com>
Authored: Mon Jun 18 15:55:00 2018 -0500
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Mon Jun 18 15:55:00 2018 -0500
----------------------------------------------------------------------
.../hadoop/hive/ql/io/arrow/Deserializer.java | 94 +++++++-------------
.../hadoop/hive/ql/io/arrow/Serializer.java | 15 ++--
2 files changed, 40 insertions(+), 69 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/1a610cc5/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
index 6e09d39..edc4b39 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Deserializer.java
@@ -29,9 +29,7 @@ import org.apache.arrow.vector.IntVector;
import org.apache.arrow.vector.IntervalDayVector;
import org.apache.arrow.vector.IntervalYearVector;
import org.apache.arrow.vector.SmallIntVector;
-import org.apache.arrow.vector.TimeStampMicroVector;
-import org.apache.arrow.vector.TimeStampMilliVector;
-import org.apache.arrow.vector.TimeStampNanoVector;
+import org.apache.arrow.vector.TimeStampVector;
import org.apache.arrow.vector.TinyIntVector;
import org.apache.arrow.vector.VarBinaryVector;
import org.apache.arrow.vector.VarCharVector;
@@ -268,35 +266,11 @@ class Deserializer {
}
break;
case TIMESTAMPMILLI:
- {
- for (int i = 0; i < size; i++) {
- if (arrowVector.isNull(i)) {
- VectorizedBatchUtil.setNullColIsNullValue(hiveVector, i);
- } else {
- hiveVector.isNull[i] = false;
-
- // Time = second + sub-second
- final long timeInMillis = ((TimeStampMilliVector) arrowVector).get(i);
- final TimestampColumnVector timestampColumnVector = (TimestampColumnVector) hiveVector;
- int subSecondInNanos = (int) ((timeInMillis % MILLIS_PER_SECOND) * NS_PER_MILLIS);
- long second = timeInMillis / MILLIS_PER_SECOND;
-
- // A nanosecond value should not be negative
- if (subSecondInNanos < 0) {
-
- // So add one second to the negative nanosecond value to make it positive
- subSecondInNanos += NS_PER_SECOND;
-
- // Subtract one second from the second value because we added one second
- second -= 1;
- }
- timestampColumnVector.time[i] = second * MILLIS_PER_SECOND;
- timestampColumnVector.nanos[i] = subSecondInNanos;
- }
- }
- }
- break;
+ case TIMESTAMPMILLITZ:
case TIMESTAMPMICRO:
+ case TIMESTAMPMICROTZ:
+ case TIMESTAMPNANO:
+ case TIMESTAMPNANOTZ:
{
for (int i = 0; i < size; i++) {
if (arrowVector.isNull(i)) {
@@ -305,40 +279,36 @@ class Deserializer {
hiveVector.isNull[i] = false;
// Time = second + sub-second
- final long timeInMicros = ((TimeStampMicroVector) arrowVector).get(i);
- final TimestampColumnVector timestampColumnVector = (TimestampColumnVector) hiveVector;
- int subSecondInNanos = (int) ((timeInMicros % MICROS_PER_SECOND) * NS_PER_MICROS);
- long second = timeInMicros / MICROS_PER_SECOND;
-
- // A nanosecond value should not be negative
- if (subSecondInNanos < 0) {
-
- // So add one second to the negative nanosecond value to make it positive
- subSecondInNanos += NS_PER_SECOND;
-
- // Subtract one second from the second value because we added one second
- second -= 1;
+ final long time = ((TimeStampVector) arrowVector).get(i);
+ long second;
+ int subSecondInNanos;
+ switch (minorType) {
+ case TIMESTAMPMILLI:
+ case TIMESTAMPMILLITZ:
+ {
+ subSecondInNanos = (int) ((time % MILLIS_PER_SECOND) * NS_PER_MILLIS);
+ second = time / MILLIS_PER_SECOND;
+ }
+ break;
+ case TIMESTAMPMICROTZ:
+ case TIMESTAMPMICRO:
+ {
+ subSecondInNanos = (int) ((time % MICROS_PER_SECOND) * NS_PER_MICROS);
+ second = time / MICROS_PER_SECOND;
+ }
+ break;
+ case TIMESTAMPNANOTZ:
+ case TIMESTAMPNANO:
+ {
+ subSecondInNanos = (int) (time % NS_PER_SECOND);
+ second = time / NS_PER_SECOND;
+ }
+ break;
+ default:
+ throw new IllegalArgumentException();
}
- timestampColumnVector.time[i] = second * MILLIS_PER_SECOND;
- timestampColumnVector.nanos[i] = subSecondInNanos;
- }
- }
- }
- break;
- case TIMESTAMPNANO:
- {
- for (int i = 0; i < size; i++) {
- if (arrowVector.isNull(i)) {
- VectorizedBatchUtil.setNullColIsNullValue(hiveVector, i);
- } else {
- hiveVector.isNull[i] = false;
- // Time = second + sub-second
- final long timeInNanos = ((TimeStampNanoVector) arrowVector).get(i);
final TimestampColumnVector timestampColumnVector = (TimestampColumnVector) hiveVector;
- int subSecondInNanos = (int) (timeInNanos % NS_PER_SECOND);
- long second = timeInNanos / NS_PER_SECOND;
-
// A nanosecond value should not be negative
if (subSecondInNanos < 0) {
http://git-wip-us.apache.org/repos/asf/hive/blob/1a610cc5/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Serializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Serializer.java b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Serializer.java
index e6af916..2961050 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Serializer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/arrow/Serializer.java
@@ -30,7 +30,7 @@ import org.apache.arrow.vector.IntVector;
import org.apache.arrow.vector.IntervalDayVector;
import org.apache.arrow.vector.IntervalYearVector;
import org.apache.arrow.vector.SmallIntVector;
-import org.apache.arrow.vector.TimeStampMicroVector;
+import org.apache.arrow.vector.TimeStampMicroTZVector;
import org.apache.arrow.vector.TinyIntVector;
import org.apache.arrow.vector.VarBinaryVector;
import org.apache.arrow.vector.VarCharVector;
@@ -38,6 +38,7 @@ import org.apache.arrow.vector.VectorSchemaRoot;
import org.apache.arrow.vector.complex.ListVector;
import org.apache.arrow.vector.complex.MapVector;
import org.apache.arrow.vector.complex.NullableMapVector;
+import org.apache.arrow.vector.types.TimeUnit;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.ArrowType;
import org.apache.arrow.vector.types.pojo.FieldType;
@@ -177,8 +178,8 @@ class Serializer {
case DATE:
return Types.MinorType.DATEDAY.getType();
case TIMESTAMP:
- // HIVE-19723: Prefer microsecond because Spark supports it
- return Types.MinorType.TIMESTAMPMICRO.getType();
+ // HIVE-19853: Prefer timestamp in microsecond with time zone because Spark supports it
+ return new ArrowType.Timestamp(TimeUnit.MICROSECOND, "UTC");
case BINARY:
return Types.MinorType.VARBINARY.getType();
case DECIMAL:
@@ -433,11 +434,11 @@ class Serializer {
break;
case TIMESTAMP:
{
- final TimeStampMicroVector timeStampMicroVector = (TimeStampMicroVector) arrowVector;
+ final TimeStampMicroTZVector timeStampMicroTZVector = (TimeStampMicroTZVector) arrowVector;
final TimestampColumnVector timestampColumnVector = (TimestampColumnVector) hiveVector;
for (int i = 0; i < size; i++) {
if (hiveVector.isNull[i]) {
- timeStampMicroVector.setNull(i);
+ timeStampMicroTZVector.setNull(i);
} else {
// Time = second + sub-second
final long secondInMillis = timestampColumnVector.getTime(i);
@@ -446,9 +447,9 @@ class Serializer {
if ((secondInMillis > 0 && secondInMicros < 0) || (secondInMillis < 0 && secondInMicros > 0)) {
// If the timestamp cannot be represented in long microsecond, set it as a null value
- timeStampMicroVector.setNull(i);
+ timeStampMicroTZVector.setNull(i);
} else {
- timeStampMicroVector.set(i, secondInMicros + subSecondInMicros);
+ timeStampMicroTZVector.set(i, secondInMicros + subSecondInMicros);
}
}
}
[30/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/mergejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mergejoin.q.out b/ql/src/test/results/clientpositive/llap/mergejoin.q.out
index 63bf690..1e4f632 100644
--- a/ql/src/test/results/clientpositive/llap/mergejoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/mergejoin.q.out
@@ -64,8 +64,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -148,8 +147,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1776,8 +1774,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1821,8 +1819,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1984,8 +1982,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2068,8 +2066,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2120,8 +2117,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2382,8 +2379,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2435,8 +2432,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2637,8 +2634,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2690,8 +2687,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2832,8 +2829,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2885,8 +2882,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3046,8 +3043,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3130,8 +3127,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3182,8 +3178,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3491,8 +3487,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3544,8 +3540,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3697,8 +3693,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3749,8 +3745,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_create.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_create.q.out b/ql/src/test/results/clientpositive/llap/orc_create.q.out
index 5aa43a8..423e51c 100644
--- a/ql/src/test/results/clientpositive/llap/orc_create.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_create.q.out
@@ -449,7 +449,7 @@ PREHOOK: query: CREATE TABLE orc_create_people_staging (
first_name string,
last_name string,
address string,
- salary decimal,
+ salary decimal(38,0),
start_date timestamp,
state string)
PREHOOK: type: CREATETABLE
@@ -460,7 +460,7 @@ POSTHOOK: query: CREATE TABLE orc_create_people_staging (
first_name string,
last_name string,
address string,
- salary decimal,
+ salary decimal(38,0),
start_date timestamp,
state string)
POSTHOOK: type: CREATETABLE
@@ -481,7 +481,7 @@ PREHOOK: query: CREATE TABLE orc_create_people (
first_name string,
last_name string,
address string,
- salary decimal,
+ salary decimal(38,0),
start_date timestamp)
PARTITIONED BY (state string)
STORED AS orc
@@ -493,7 +493,7 @@ POSTHOOK: query: CREATE TABLE orc_create_people (
first_name string,
last_name string,
address string,
- salary decimal,
+ salary decimal(38,0),
start_date timestamp)
PARTITIONED BY (state string)
STORED AS orc
@@ -515,13 +515,13 @@ POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).address SIMPLE [(orc_cr
POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
-POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
PREHOOK: query: SELECT COUNT(*) FROM orc_create_people where id < 10 and state = 'Ca'
PREHOOK: type: QUERY
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out b/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out
index c4fe46e..65eec52 100644
--- a/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out
@@ -80,11 +80,13 @@ STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.c
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_ppd_staging_n0
-PREHOOK: query: insert overwrite table orc_ppd_staging_n0 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging_n6 order by t, s
+PREHOOK: query: insert overwrite table orc_ppd_staging_n0 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from staging_n6 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
PREHOOK: type: QUERY
PREHOOK: Input: default@staging_n6
PREHOOK: Output: default@orc_ppd_staging_n0
-POSTHOOK: query: insert overwrite table orc_ppd_staging_n0 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging_n6 order by t, s
+POSTHOOK: query: insert overwrite table orc_ppd_staging_n0 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from staging_n6 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
POSTHOOK: type: QUERY
POSTHOOK: Input: default@staging_n6
POSTHOOK: Output: default@orc_ppd_staging_n0
@@ -177,11 +179,13 @@ STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.c
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_ppd_n1
-PREHOOK: query: insert overwrite table orc_ppd_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n0 order by t, s
+PREHOOK: query: insert overwrite table orc_ppd_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging_n0 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_staging_n0
PREHOOK: Output: default@orc_ppd_n1
-POSTHOOK: query: insert overwrite table orc_ppd_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n0 order by t, s
+POSTHOOK: query: insert overwrite table orc_ppd_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging_n0 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_ppd_staging_n0
POSTHOOK: Output: default@orc_ppd_n1
@@ -233,7 +237,7 @@ Table Parameters:
orc.bloom.filter.columns *
orc.row.index.stride 1000
rawDataSize 1139514
- totalSize 55453
+ totalSize 55665
#### A masked pattern was here ####
# Storage Information
@@ -251,7 +255,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n1
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 16676
+ HDFS_BYTES_READ: 16675
HDFS_BYTES_WRITTEN: 104
HDFS_READ_OPS: 7
HDFS_LARGE_READ_OPS: 0
@@ -943,7 +947,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n1
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 5691
+ HDFS_BYTES_READ: 5911
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -964,9 +968,9 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
ALLOCATED_BYTES: 1310720
- ALLOCATED_USED_BYTES: 13796
+ ALLOCATED_USED_BYTES: 13810
CACHE_HIT_BYTES: 24
- CACHE_MISS_BYTES: 5691
+ CACHE_MISS_BYTES: 5911
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
NUM_VECTOR_BATCHES: 1
@@ -1003,7 +1007,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 6
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1041,7 +1045,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 6
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1101,7 +1105,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 2100
RECORDS_OUT_OPERATOR_TS_0: 2100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 3
@@ -1139,7 +1143,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 0
RECORDS_OUT_OPERATOR_TS_0: 0
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 1735
+ CACHE_HIT_BYTES: 1726
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
SELECTED_ROWGROUPS: 0
@@ -1174,7 +1178,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 2
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1212,7 +1216,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 2
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1250,7 +1254,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 2
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1276,7 +1280,7 @@ Stage-1 FILE SYSTEM COUNTERS:
Stage-1 HIVE COUNTERS:
CREATED_FILES: 1
DESERIALIZE_ERRORS: 0
- RECORDS_IN_Map_1: 2000
+ RECORDS_IN_Map_1: 2100
RECORDS_OUT_0: 1
RECORDS_OUT_INTERMEDIATE_Map_1: 81
RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
@@ -1286,15 +1290,15 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_MAP_0: 0
RECORDS_OUT_OPERATOR_RS_10: 81
RECORDS_OUT_OPERATOR_SEL_9: 81
- RECORDS_OUT_OPERATOR_TS_0: 2000
+ RECORDS_OUT_OPERATOR_TS_0: 2100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
- NUM_DECODED_BATCHES: 2
- NUM_VECTOR_BATCHES: 2
- ROWS_EMITTED: 2000
- SELECTED_ROWGROUPS: 2
+ NUM_DECODED_BATCHES: 3
+ NUM_VECTOR_BATCHES: 3
+ ROWS_EMITTED: 2100
+ SELECTED_ROWGROUPS: 3
Stage-1 INPUT COUNTERS:
GROUPED_INPUT_SPLITS_Map_1: 1
INPUT_DIRECTORIES_Map_1: 1
@@ -1314,7 +1318,7 @@ Stage-1 FILE SYSTEM COUNTERS:
Stage-1 HIVE COUNTERS:
CREATED_FILES: 1
DESERIALIZE_ERRORS: 0
- RECORDS_IN_Map_1: 2000
+ RECORDS_IN_Map_1: 2100
RECORDS_OUT_0: 1
RECORDS_OUT_INTERMEDIATE_Map_1: 74
RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
@@ -1324,15 +1328,15 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_MAP_0: 0
RECORDS_OUT_OPERATOR_RS_10: 74
RECORDS_OUT_OPERATOR_SEL_9: 74
- RECORDS_OUT_OPERATOR_TS_0: 2000
+ RECORDS_OUT_OPERATOR_TS_0: 2100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
- NUM_DECODED_BATCHES: 2
- NUM_VECTOR_BATCHES: 2
- ROWS_EMITTED: 2000
- SELECTED_ROWGROUPS: 2
+ NUM_DECODED_BATCHES: 3
+ NUM_VECTOR_BATCHES: 3
+ ROWS_EMITTED: 2100
+ SELECTED_ROWGROUPS: 3
Stage-1 INPUT COUNTERS:
GROUPED_INPUT_SPLITS_Map_1: 1
INPUT_DIRECTORIES_Map_1: 1
@@ -1364,7 +1368,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 12
RECORDS_OUT_OPERATOR_TS_0: 2000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 2
@@ -1402,7 +1406,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 13
RECORDS_OUT_OPERATOR_TS_0: 2000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 2
@@ -1440,7 +1444,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 1
RECORDS_OUT_OPERATOR_TS_0: 100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1478,7 +1482,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 7
RECORDS_OUT_OPERATOR_TS_0: 1100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 2
@@ -1516,7 +1520,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 0
RECORDS_OUT_OPERATOR_TS_0: 0
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 1735
+ CACHE_HIT_BYTES: 1726
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
SELECTED_ROWGROUPS: 0
@@ -1551,7 +1555,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 0
RECORDS_OUT_OPERATOR_TS_0: 0
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 1735
+ CACHE_HIT_BYTES: 1726
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
SELECTED_ROWGROUPS: 0
@@ -1586,7 +1590,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 0
RECORDS_OUT_OPERATOR_TS_0: 0
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 1735
+ CACHE_HIT_BYTES: 1726
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
SELECTED_ROWGROUPS: 0
@@ -1621,7 +1625,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 2
RECORDS_OUT_OPERATOR_TS_0: 100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1659,7 +1663,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 6
RECORDS_OUT_OPERATOR_TS_0: 1100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 2
@@ -1697,7 +1701,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 2
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 6770
+ CACHE_HIT_BYTES: 6990
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1735,7 +1739,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_9: 2
RECORDS_OUT_OPERATOR_TS_0: 100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 6770
+ CACHE_HIT_BYTES: 6990
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out b/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out
index 6dec42f..93e2667 100644
--- a/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out
@@ -80,11 +80,13 @@ STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.c
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_ppd_staging
-PREHOOK: query: insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging order by t, s
+PREHOOK: query: insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from staging order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
PREHOOK: type: QUERY
PREHOOK: Input: default@staging
PREHOOK: Output: default@orc_ppd_staging
-POSTHOOK: query: insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging order by t, s
+POSTHOOK: query: insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from staging order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
POSTHOOK: type: QUERY
POSTHOOK: Input: default@staging
POSTHOOK: Output: default@orc_ppd_staging
@@ -177,11 +179,13 @@ STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.c
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_ppd
-PREHOOK: query: insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging order by t, s
+PREHOOK: query: insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_staging
PREHOOK: Output: default@orc_ppd
-POSTHOOK: query: insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging order by t, s
+POSTHOOK: query: insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_ppd_staging
POSTHOOK: Output: default@orc_ppd
@@ -233,7 +237,7 @@ Table Parameters:
orc.bloom.filter.columns *
orc.row.index.stride 1000
rawDataSize 1139514
- totalSize 55453
+ totalSize 55665
#### A masked pattern was here ####
# Storage Information
@@ -251,7 +255,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 17731
+ HDFS_BYTES_READ: 17730
HDFS_BYTES_WRITTEN: 104
HDFS_READ_OPS: 8
HDFS_LARGE_READ_OPS: 0
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_merge11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge11.q.out b/ql/src/test/results/clientpositive/llap/orc_merge11.q.out
index 1b2ddd3..8e7840c 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge11.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge11.q.out
@@ -6,11 +6,11 @@ PREHOOK: query: DROP TABLE orc_split_elim_n0
PREHOOK: type: DROPTABLE
POSTHOOK: query: DROP TABLE orc_split_elim_n0
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_split_elim_n0
-POSTHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_split_elim_n0
@@ -30,36 +30,36 @@ POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' in
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@orc_split_elim_n0
-PREHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096")
+PREHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc tblproperties("orc.compress.size"="4096")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096")
+POSTHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc tblproperties("orc.compress.size"="4096")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orcfile_merge1_n2
-PREHOOK: query: insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0
+PREHOOK: query: insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0 order by userid
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_split_elim_n0
PREHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: query: insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0
+POSTHOOK: query: insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0 order by userid
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_split_elim_n0
POSTHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.string1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.subtype SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.ts SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.userid SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:userid, type:bigint, comment:null), ]
-PREHOOK: query: insert into table orcfile_merge1_n2 select * from orc_split_elim_n0
+PREHOOK: query: insert into table orcfile_merge1_n2 select * from orc_split_elim_n0 order by userid
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_split_elim_n0
PREHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: query: insert into table orcfile_merge1_n2 select * from orc_split_elim_n0
+POSTHOOK: query: insert into table orcfile_merge1_n2 select * from orc_split_elim_n0 order by userid
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_split_elim_n0
POSTHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.string1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.subtype SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.ts SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -76,42 +76,42 @@ File Version: 0.12 with ORC_135
Rows: 50000
Compression: ZLIB
Compression size: 4096
-Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(10,0),ts:timestamp>
+Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(38,0),ts:timestamp>
Stripe Statistics:
Stripe 1:
Column 0: count: 50000 hasNull: false
- Column 1: count: 50000 hasNull: false bytesOnDisk: 45 min: 2 max: 100 sum: 4999238
- Column 2: count: 50000 hasNull: false bytesOnDisk: 72 min: bar max: zebra sum: 249980
- Column 3: count: 50000 hasNull: false bytesOnDisk: 5167 min: 0.8 max: 80.0 sum: 400102.80000000005
- Column 4: count: 50000 hasNull: false bytesOnDisk: 542 min: 0 max: 6 sum: 32
- Column 5: count: 50000 hasNull: false bytesOnDisk: 71 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 50000 hasNull: false bytesOnDisk: 30 min: 2 max: 100 sum: 4999238
+ Column 2: count: 50000 hasNull: false bytesOnDisk: 55 min: bar max: zebra sum: 249980
+ Column 3: count: 50000 hasNull: false bytesOnDisk: 5114 min: 0.8 max: 80.0 sum: 400102.8
+ Column 4: count: 50000 hasNull: false bytesOnDisk: 498 min: 0 max: 6 sum: 32
+ Column 5: count: 50000 hasNull: false bytesOnDisk: 64 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
File Statistics:
Column 0: count: 50000 hasNull: false
- Column 1: count: 50000 hasNull: false bytesOnDisk: 45 min: 2 max: 100 sum: 4999238
- Column 2: count: 50000 hasNull: false bytesOnDisk: 72 min: bar max: zebra sum: 249980
- Column 3: count: 50000 hasNull: false bytesOnDisk: 5167 min: 0.8 max: 80.0 sum: 400102.80000000005
- Column 4: count: 50000 hasNull: false bytesOnDisk: 542 min: 0 max: 6 sum: 32
- Column 5: count: 50000 hasNull: false bytesOnDisk: 71 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 50000 hasNull: false bytesOnDisk: 30 min: 2 max: 100 sum: 4999238
+ Column 2: count: 50000 hasNull: false bytesOnDisk: 55 min: bar max: zebra sum: 249980
+ Column 3: count: 50000 hasNull: false bytesOnDisk: 5114 min: 0.8 max: 80.0 sum: 400102.8
+ Column 4: count: 50000 hasNull: false bytesOnDisk: 498 min: 0 max: 6 sum: 32
+ Column 5: count: 50000 hasNull: false bytesOnDisk: 64 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
Stripes:
- Stripe: offset: 3 data: 5897 rows: 50000 tail: 113 index: 497
+ Stripe: offset: 3 data: 5761 rows: 50000 tail: 112 index: 433
Stream: column 0 section ROW_INDEX start: 3 length 17
- Stream: column 1 section ROW_INDEX start: 20 length 83
- Stream: column 2 section ROW_INDEX start: 103 length 81
- Stream: column 3 section ROW_INDEX start: 184 length 111
- Stream: column 4 section ROW_INDEX start: 295 length 110
- Stream: column 5 section ROW_INDEX start: 405 length 95
- Stream: column 1 section DATA start: 500 length 45
- Stream: column 2 section DATA start: 545 length 41
- Stream: column 2 section LENGTH start: 586 length 8
- Stream: column 2 section DICTIONARY_DATA start: 594 length 23
- Stream: column 3 section DATA start: 617 length 5167
- Stream: column 4 section DATA start: 5784 length 524
- Stream: column 4 section SECONDARY start: 6308 length 18
- Stream: column 5 section DATA start: 6326 length 53
- Stream: column 5 section SECONDARY start: 6379 length 18
+ Stream: column 1 section ROW_INDEX start: 20 length 73
+ Stream: column 2 section ROW_INDEX start: 93 length 79
+ Stream: column 3 section ROW_INDEX start: 172 length 85
+ Stream: column 4 section ROW_INDEX start: 257 length 92
+ Stream: column 5 section ROW_INDEX start: 349 length 87
+ Stream: column 1 section DATA start: 436 length 30
+ Stream: column 2 section DATA start: 466 length 24
+ Stream: column 2 section LENGTH start: 490 length 8
+ Stream: column 2 section DICTIONARY_DATA start: 498 length 23
+ Stream: column 3 section DATA start: 521 length 5114
+ Stream: column 4 section DATA start: 5635 length 480
+ Stream: column 4 section SECONDARY start: 6115 length 18
+ Stream: column 5 section DATA start: 6133 length 46
+ Stream: column 5 section SECONDARY start: 6179 length 18
Encoding column 0: DIRECT
Encoding column 1: DIRECT_V2
Encoding column 2: DICTIONARY_V2[6]
@@ -125,37 +125,37 @@ Stripes:
Entry 3: count: 10000 hasNull: false positions:
Entry 4: count: 10000 hasNull: false positions:
Row group indices for column 1:
- Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
- Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
- Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
- Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+ Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999238 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,107,262
+ Entry 2: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,207,22
+ Entry 3: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,302,294
+ Entry 4: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,402,54
Row group indices for column 2:
- Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
- Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
- Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
- Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+ Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49980 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,83,262
+ Entry 2: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,163,22
+ Entry 3: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,239,294
+ Entry 4: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,319,54
Row group indices for column 3:
- Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
- Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
- Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
- Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
- Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+ Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80102.8 positions: 0,0
+ Entry 1: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 1017,2176
+ Entry 2: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 2057,256
+ Entry 3: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 3045,2432
+ Entry 4: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 4085,512
Row group indices for column 4:
- Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
- Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
- Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
- Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+ Entry 0: count: 10000 hasNull: false min: 0 max: 6 sum: 32 positions: 0,0,0,0,0
+ Entry 1: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 84,1808,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 160,3616,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 274,1328,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 350,3136,0,312,64
Row group indices for column 5:
Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,164,391,0,76,272
- Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,336,391,0,156,32
- Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:05.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,508,391,0,232,304
- Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:15.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,680,391,0,312,64
+ Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,194,262,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,354,22,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,506,294,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,666,54,0,312,64
-File length: 6890 bytes
+File length: 6685 bytes
Padding length: 0 bytes
Padding ratio: 0%
________________________________________________________________________________________________________________________
@@ -167,42 +167,42 @@ File Version: 0.12 with ORC_135
Rows: 50000
Compression: ZLIB
Compression size: 4096
-Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(10,0),ts:timestamp>
+Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(38,0),ts:timestamp>
Stripe Statistics:
Stripe 1:
Column 0: count: 50000 hasNull: false
- Column 1: count: 50000 hasNull: false bytesOnDisk: 45 min: 2 max: 100 sum: 4999238
- Column 2: count: 50000 hasNull: false bytesOnDisk: 72 min: bar max: zebra sum: 249980
- Column 3: count: 50000 hasNull: false bytesOnDisk: 5167 min: 0.8 max: 80.0 sum: 400102.80000000005
- Column 4: count: 50000 hasNull: false bytesOnDisk: 542 min: 0 max: 6 sum: 32
- Column 5: count: 50000 hasNull: false bytesOnDisk: 71 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 50000 hasNull: false bytesOnDisk: 30 min: 2 max: 100 sum: 4999238
+ Column 2: count: 50000 hasNull: false bytesOnDisk: 55 min: bar max: zebra sum: 249980
+ Column 3: count: 50000 hasNull: false bytesOnDisk: 5114 min: 0.8 max: 80.0 sum: 400102.8
+ Column 4: count: 50000 hasNull: false bytesOnDisk: 498 min: 0 max: 6 sum: 32
+ Column 5: count: 50000 hasNull: false bytesOnDisk: 64 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
File Statistics:
Column 0: count: 50000 hasNull: false
- Column 1: count: 50000 hasNull: false bytesOnDisk: 45 min: 2 max: 100 sum: 4999238
- Column 2: count: 50000 hasNull: false bytesOnDisk: 72 min: bar max: zebra sum: 249980
- Column 3: count: 50000 hasNull: false bytesOnDisk: 5167 min: 0.8 max: 80.0 sum: 400102.80000000005
- Column 4: count: 50000 hasNull: false bytesOnDisk: 542 min: 0 max: 6 sum: 32
- Column 5: count: 50000 hasNull: false bytesOnDisk: 71 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 50000 hasNull: false bytesOnDisk: 30 min: 2 max: 100 sum: 4999238
+ Column 2: count: 50000 hasNull: false bytesOnDisk: 55 min: bar max: zebra sum: 249980
+ Column 3: count: 50000 hasNull: false bytesOnDisk: 5114 min: 0.8 max: 80.0 sum: 400102.8
+ Column 4: count: 50000 hasNull: false bytesOnDisk: 498 min: 0 max: 6 sum: 32
+ Column 5: count: 50000 hasNull: false bytesOnDisk: 64 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
Stripes:
- Stripe: offset: 3 data: 5897 rows: 50000 tail: 113 index: 497
+ Stripe: offset: 3 data: 5761 rows: 50000 tail: 112 index: 433
Stream: column 0 section ROW_INDEX start: 3 length 17
- Stream: column 1 section ROW_INDEX start: 20 length 83
- Stream: column 2 section ROW_INDEX start: 103 length 81
- Stream: column 3 section ROW_INDEX start: 184 length 111
- Stream: column 4 section ROW_INDEX start: 295 length 110
- Stream: column 5 section ROW_INDEX start: 405 length 95
- Stream: column 1 section DATA start: 500 length 45
- Stream: column 2 section DATA start: 545 length 41
- Stream: column 2 section LENGTH start: 586 length 8
- Stream: column 2 section DICTIONARY_DATA start: 594 length 23
- Stream: column 3 section DATA start: 617 length 5167
- Stream: column 4 section DATA start: 5784 length 524
- Stream: column 4 section SECONDARY start: 6308 length 18
- Stream: column 5 section DATA start: 6326 length 53
- Stream: column 5 section SECONDARY start: 6379 length 18
+ Stream: column 1 section ROW_INDEX start: 20 length 73
+ Stream: column 2 section ROW_INDEX start: 93 length 79
+ Stream: column 3 section ROW_INDEX start: 172 length 85
+ Stream: column 4 section ROW_INDEX start: 257 length 92
+ Stream: column 5 section ROW_INDEX start: 349 length 87
+ Stream: column 1 section DATA start: 436 length 30
+ Stream: column 2 section DATA start: 466 length 24
+ Stream: column 2 section LENGTH start: 490 length 8
+ Stream: column 2 section DICTIONARY_DATA start: 498 length 23
+ Stream: column 3 section DATA start: 521 length 5114
+ Stream: column 4 section DATA start: 5635 length 480
+ Stream: column 4 section SECONDARY start: 6115 length 18
+ Stream: column 5 section DATA start: 6133 length 46
+ Stream: column 5 section SECONDARY start: 6179 length 18
Encoding column 0: DIRECT
Encoding column 1: DIRECT_V2
Encoding column 2: DICTIONARY_V2[6]
@@ -216,37 +216,37 @@ Stripes:
Entry 3: count: 10000 hasNull: false positions:
Entry 4: count: 10000 hasNull: false positions:
Row group indices for column 1:
- Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
- Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
- Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
- Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+ Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999238 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,107,262
+ Entry 2: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,207,22
+ Entry 3: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,302,294
+ Entry 4: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,402,54
Row group indices for column 2:
- Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
- Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
- Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
- Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+ Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49980 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,83,262
+ Entry 2: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,163,22
+ Entry 3: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,239,294
+ Entry 4: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,319,54
Row group indices for column 3:
- Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
- Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
- Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
- Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
- Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+ Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80102.8 positions: 0,0
+ Entry 1: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 1017,2176
+ Entry 2: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 2057,256
+ Entry 3: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 3045,2432
+ Entry 4: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 4085,512
Row group indices for column 4:
- Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
- Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
- Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
- Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+ Entry 0: count: 10000 hasNull: false min: 0 max: 6 sum: 32 positions: 0,0,0,0,0
+ Entry 1: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 84,1808,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 160,3616,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 274,1328,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 350,3136,0,312,64
Row group indices for column 5:
Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,164,391,0,76,272
- Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,336,391,0,156,32
- Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:05.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,508,391,0,232,304
- Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:15.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,680,391,0,312,64
+ Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,194,262,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,354,22,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,506,294,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,666,54,0,312,64
-File length: 6890 bytes
+File length: 6685 bytes
Padding length: 0 bytes
Padding ratio: 0%
________________________________________________________________________________________________________________________
@@ -279,49 +279,49 @@ File Version: 0.12 with ORC_135
Rows: 100000
Compression: ZLIB
Compression size: 4096
-Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(10,0),ts:timestamp>
+Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(38,0),ts:timestamp>
Stripe Statistics:
Stripe 1:
Column 0: count: 50000 hasNull: false
- Column 1: count: 50000 hasNull: false bytesOnDisk: 45 min: 2 max: 100 sum: 4999238
- Column 2: count: 50000 hasNull: false bytesOnDisk: 72 min: bar max: zebra sum: 249980
- Column 3: count: 50000 hasNull: false bytesOnDisk: 5167 min: 0.8 max: 80.0 sum: 400102.80000000005
- Column 4: count: 50000 hasNull: false bytesOnDisk: 542 min: 0 max: 6 sum: 32
- Column 5: count: 50000 hasNull: false bytesOnDisk: 71 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 50000 hasNull: false bytesOnDisk: 30 min: 2 max: 100 sum: 4999238
+ Column 2: count: 50000 hasNull: false bytesOnDisk: 55 min: bar max: zebra sum: 249980
+ Column 3: count: 50000 hasNull: false bytesOnDisk: 5114 min: 0.8 max: 80.0 sum: 400102.8
+ Column 4: count: 50000 hasNull: false bytesOnDisk: 498 min: 0 max: 6 sum: 32
+ Column 5: count: 50000 hasNull: false bytesOnDisk: 64 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
Stripe 2:
Column 0: count: 50000 hasNull: false
- Column 1: count: 50000 hasNull: false bytesOnDisk: 45 min: 2 max: 100 sum: 4999238
- Column 2: count: 50000 hasNull: false bytesOnDisk: 72 min: bar max: zebra sum: 249980
- Column 3: count: 50000 hasNull: false bytesOnDisk: 5167 min: 0.8 max: 80.0 sum: 400102.80000000005
- Column 4: count: 50000 hasNull: false bytesOnDisk: 542 min: 0 max: 6 sum: 32
- Column 5: count: 50000 hasNull: false bytesOnDisk: 71 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 50000 hasNull: false bytesOnDisk: 30 min: 2 max: 100 sum: 4999238
+ Column 2: count: 50000 hasNull: false bytesOnDisk: 55 min: bar max: zebra sum: 249980
+ Column 3: count: 50000 hasNull: false bytesOnDisk: 5114 min: 0.8 max: 80.0 sum: 400102.8
+ Column 4: count: 50000 hasNull: false bytesOnDisk: 498 min: 0 max: 6 sum: 32
+ Column 5: count: 50000 hasNull: false bytesOnDisk: 64 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
File Statistics:
Column 0: count: 100000 hasNull: false
- Column 1: count: 100000 hasNull: false bytesOnDisk: 90 min: 2 max: 100 sum: 9998476
- Column 2: count: 100000 hasNull: false bytesOnDisk: 144 min: bar max: zebra sum: 499960
- Column 3: count: 100000 hasNull: false bytesOnDisk: 10334 min: 0.8 max: 80.0 sum: 800205.6000000001
- Column 4: count: 100000 hasNull: false bytesOnDisk: 1084 min: 0 max: 6 sum: 64
- Column 5: count: 100000 hasNull: false bytesOnDisk: 142 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 100000 hasNull: false bytesOnDisk: 60 min: 2 max: 100 sum: 9998476
+ Column 2: count: 100000 hasNull: false bytesOnDisk: 110 min: bar max: zebra sum: 499960
+ Column 3: count: 100000 hasNull: false bytesOnDisk: 10228 min: 0.8 max: 80.0 sum: 800205.6
+ Column 4: count: 100000 hasNull: false bytesOnDisk: 996 min: 0 max: 6 sum: 64
+ Column 5: count: 100000 hasNull: false bytesOnDisk: 128 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
Stripes:
- Stripe: offset: 3 data: 5897 rows: 50000 tail: 113 index: 497
+ Stripe: offset: 3 data: 5761 rows: 50000 tail: 112 index: 433
Stream: column 0 section ROW_INDEX start: 3 length 17
- Stream: column 1 section ROW_INDEX start: 20 length 83
- Stream: column 2 section ROW_INDEX start: 103 length 81
- Stream: column 3 section ROW_INDEX start: 184 length 111
- Stream: column 4 section ROW_INDEX start: 295 length 110
- Stream: column 5 section ROW_INDEX start: 405 length 95
- Stream: column 1 section DATA start: 500 length 45
- Stream: column 2 section DATA start: 545 length 41
- Stream: column 2 section LENGTH start: 586 length 8
- Stream: column 2 section DICTIONARY_DATA start: 594 length 23
- Stream: column 3 section DATA start: 617 length 5167
- Stream: column 4 section DATA start: 5784 length 524
- Stream: column 4 section SECONDARY start: 6308 length 18
- Stream: column 5 section DATA start: 6326 length 53
- Stream: column 5 section SECONDARY start: 6379 length 18
+ Stream: column 1 section ROW_INDEX start: 20 length 73
+ Stream: column 2 section ROW_INDEX start: 93 length 79
+ Stream: column 3 section ROW_INDEX start: 172 length 85
+ Stream: column 4 section ROW_INDEX start: 257 length 92
+ Stream: column 5 section ROW_INDEX start: 349 length 87
+ Stream: column 1 section DATA start: 436 length 30
+ Stream: column 2 section DATA start: 466 length 24
+ Stream: column 2 section LENGTH start: 490 length 8
+ Stream: column 2 section DICTIONARY_DATA start: 498 length 23
+ Stream: column 3 section DATA start: 521 length 5114
+ Stream: column 4 section DATA start: 5635 length 480
+ Stream: column 4 section SECONDARY start: 6115 length 18
+ Stream: column 5 section DATA start: 6133 length 46
+ Stream: column 5 section SECONDARY start: 6179 length 18
Encoding column 0: DIRECT
Encoding column 1: DIRECT_V2
Encoding column 2: DICTIONARY_V2[6]
@@ -335,51 +335,51 @@ Stripes:
Entry 3: count: 10000 hasNull: false positions:
Entry 4: count: 10000 hasNull: false positions:
Row group indices for column 1:
- Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
- Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
- Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
- Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+ Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999238 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,107,262
+ Entry 2: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,207,22
+ Entry 3: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,302,294
+ Entry 4: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,402,54
Row group indices for column 2:
- Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
- Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
- Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
- Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+ Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49980 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,83,262
+ Entry 2: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,163,22
+ Entry 3: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,239,294
+ Entry 4: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,319,54
Row group indices for column 3:
- Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
- Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
- Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
- Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
- Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+ Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80102.8 positions: 0,0
+ Entry 1: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 1017,2176
+ Entry 2: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 2057,256
+ Entry 3: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 3045,2432
+ Entry 4: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 4085,512
Row group indices for column 4:
- Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
- Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
- Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
- Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+ Entry 0: count: 10000 hasNull: false min: 0 max: 6 sum: 32 positions: 0,0,0,0,0
+ Entry 1: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 84,1808,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 160,3616,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 274,1328,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 350,3136,0,312,64
Row group indices for column 5:
Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,164,391,0,76,272
- Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,336,391,0,156,32
- Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:05.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,508,391,0,232,304
- Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:15.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,680,391,0,312,64
- Stripe: offset: 6510 data: 5897 rows: 50000 tail: 113 index: 497
- Stream: column 0 section ROW_INDEX start: 6510 length 17
- Stream: column 1 section ROW_INDEX start: 6527 length 83
- Stream: column 2 section ROW_INDEX start: 6610 length 81
- Stream: column 3 section ROW_INDEX start: 6691 length 111
- Stream: column 4 section ROW_INDEX start: 6802 length 110
- Stream: column 5 section ROW_INDEX start: 6912 length 95
- Stream: column 1 section DATA start: 7007 length 45
- Stream: column 2 section DATA start: 7052 length 41
- Stream: column 2 section LENGTH start: 7093 length 8
- Stream: column 2 section DICTIONARY_DATA start: 7101 length 23
- Stream: column 3 section DATA start: 7124 length 5167
- Stream: column 4 section DATA start: 12291 length 524
- Stream: column 4 section SECONDARY start: 12815 length 18
- Stream: column 5 section DATA start: 12833 length 53
- Stream: column 5 section SECONDARY start: 12886 length 18
+ Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,194,262,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,354,22,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,506,294,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,666,54,0,312,64
+ Stripe: offset: 6309 data: 5761 rows: 50000 tail: 112 index: 433
+ Stream: column 0 section ROW_INDEX start: 6309 length 17
+ Stream: column 1 section ROW_INDEX start: 6326 length 73
+ Stream: column 2 section ROW_INDEX start: 6399 length 79
+ Stream: column 3 section ROW_INDEX start: 6478 length 85
+ Stream: column 4 section ROW_INDEX start: 6563 length 92
+ Stream: column 5 section ROW_INDEX start: 6655 length 87
+ Stream: column 1 section DATA start: 6742 length 30
+ Stream: column 2 section DATA start: 6772 length 24
+ Stream: column 2 section LENGTH start: 6796 length 8
+ Stream: column 2 section DICTIONARY_DATA start: 6804 length 23
+ Stream: column 3 section DATA start: 6827 length 5114
+ Stream: column 4 section DATA start: 11941 length 480
+ Stream: column 4 section SECONDARY start: 12421 length 18
+ Stream: column 5 section DATA start: 12439 length 46
+ Stream: column 5 section SECONDARY start: 12485 length 18
Encoding column 0: DIRECT
Encoding column 1: DIRECT_V2
Encoding column 2: DICTIONARY_V2[6]
@@ -393,37 +393,37 @@ Stripes:
Entry 3: count: 10000 hasNull: false positions:
Entry 4: count: 10000 hasNull: false positions:
Row group indices for column 1:
- Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
- Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
- Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
- Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+ Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999238 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,107,262
+ Entry 2: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,207,22
+ Entry 3: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,302,294
+ Entry 4: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,402,54
Row group indices for column 2:
- Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
- Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
- Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
- Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+ Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49980 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,83,262
+ Entry 2: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,163,22
+ Entry 3: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,239,294
+ Entry 4: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,319,54
Row group indices for column 3:
- Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
- Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
- Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
- Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
- Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+ Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80102.8 positions: 0,0
+ Entry 1: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 1017,2176
+ Entry 2: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 2057,256
+ Entry 3: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 3045,2432
+ Entry 4: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 4085,512
Row group indices for column 4:
- Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
- Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
- Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
- Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+ Entry 0: count: 10000 hasNull: false min: 0 max: 6 sum: 32 positions: 0,0,0,0,0
+ Entry 1: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 84,1808,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 160,3616,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 274,1328,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 350,3136,0,312,64
Row group indices for column 5:
Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,164,391,0,76,272
- Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,336,391,0,156,32
- Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:05.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,508,391,0,232,304
- Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:15.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,680,391,0,312,64
+ Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,194,262,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,354,22,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,506,294,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,666,54,0,312,64
-File length: 13411 bytes
+File length: 13004 bytes
Padding length: 0 bytes
Padding ratio: 0%
________________________________________________________________________________________________________________________
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_merge5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge5.q.out b/ql/src/test/results/clientpositive/llap/orc_merge5.q.out
index 57482f5..d49c72a 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge5.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge5.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5_n5
-POSTHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5_n5
-PREHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5b_n0
-POSTHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5b_n0
@@ -50,7 +50,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -62,7 +62,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5b_n0
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp)
outputColumnNames: userid, string1, subtype, decimal1, ts
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -73,7 +73,7 @@ STAGE PLANS:
Reduce Output Operator
sort order:
Statistics: Num rows: 1 Data size: 2696 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Execution mode: llap
LLAP IO: all inputs
Reducer 2
@@ -110,7 +110,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5b_n0
PREHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13
@@ -121,7 +121,7 @@ POSTHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,sub
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n5
POSTHOOK: Output: default@orc_merge5b_n0
-POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.string1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.subtype SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.ts SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -180,7 +180,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -192,7 +192,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5b_n0
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp)
outputColumnNames: userid, string1, subtype, decimal1, ts
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -203,7 +203,7 @@ STAGE PLANS:
Reduce Output Operator
sort order:
Statistics: Num rows: 1 Data size: 2696 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Execution mode: llap
LLAP IO: all inputs
Reducer 2
@@ -249,7 +249,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5b_n0
Stage: Stage-4
@@ -288,7 +288,7 @@ POSTHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,sub
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n5
POSTHOOK: Output: default@orc_merge5b_n0
-POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.string1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.subtype SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.ts SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -322,7 +322,7 @@ POSTHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,sub
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n5
POSTHOOK: Output: default@orc_merge5b_n0
-POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.string1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.subtype SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.ts SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:ts, type:timestamp, comment:null), ]
[27/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_llap_io.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_llap_io.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_llap_io.q.out
index 7259b33..36b53e5 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_llap_io.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_llap_io.q.out
@@ -88,8 +88,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -200,8 +199,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -314,8 +312,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -439,8 +436,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -590,8 +586,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -736,8 +731,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -875,8 +869,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1028,8 +1021,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1158,8 +1150,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table_llap_io.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table_llap_io.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table_llap_io.q.out
index 8f83622..867e134 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table_llap_io.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_table_llap_io.q.out
@@ -88,8 +88,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -194,8 +193,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -293,8 +291,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -400,8 +397,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -501,8 +497,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -608,8 +603,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -716,8 +710,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -879,8 +872,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1005,8 +997,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1134,8 +1125,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1255,8 +1245,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1377,8 +1366,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1556,8 +1544,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1726,8 +1713,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out b/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
index f411b01..fba880b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
@@ -490,8 +490,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -579,8 +579,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1023,8 +1023,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1150,8 +1150,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out b/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out
index 92366c8..ffe3bfb 100644
--- a/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out
@@ -166,8 +166,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -306,8 +306,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -446,8 +446,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out b/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out
index c99ac8d..5c35139 100644
--- a/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out
@@ -105,8 +105,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_annotate_stats_select.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_annotate_stats_select.q.out b/ql/src/test/results/clientpositive/llap/vector_annotate_stats_select.q.out
index 82ac85c..cb463a1 100644
--- a/ql/src/test/results/clientpositive/llap/vector_annotate_stats_select.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_annotate_stats_select.q.out
@@ -413,8 +413,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -474,8 +474,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -535,8 +535,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -596,8 +596,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -657,8 +657,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -718,8 +718,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -779,8 +779,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -888,8 +888,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -949,8 +949,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1010,8 +1010,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1071,8 +1071,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1132,8 +1132,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1193,8 +1193,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1254,8 +1254,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1315,8 +1315,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1412,8 +1412,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1478,8 +1478,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1544,8 +1544,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1612,8 +1612,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1719,8 +1719,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1821,8 +1821,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1928,8 +1928,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2029,8 +2029,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2096,8 +2096,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
index 559e28b..6238281 100644
--- a/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
@@ -1267,8 +1267,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1310,8 +1310,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out b/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out
index c85c59e..1824976 100644
--- a/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_between_columns.q.out
@@ -134,8 +134,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -168,8 +168,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -303,8 +303,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -337,8 +337,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_between_in.q.out b/ql/src/test/results/clientpositive/llap/vector_between_in.q.out
index b1c0bab..7355ed8 100644
--- a/ql/src/test/results/clientpositive/llap/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_between_in.q.out
@@ -79,8 +79,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -184,8 +184,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -283,8 +283,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -388,8 +388,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -487,8 +487,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -582,8 +582,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -677,8 +677,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -782,8 +782,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1137,8 +1137,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1275,8 +1275,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1413,8 +1413,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1551,8 +1551,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out b/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out
index 7d14542..e47c118 100644
--- a/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_binary_join_groupby.q.out
@@ -152,6 +152,7 @@ STAGE PLANS:
0 _col10 (type: binary)
1 _col10 (type: binary)
Map Join Vectorization:
+ bigTableValueExpressions: ConvertDecimal64ToDecimal(col 9:decimal(4,2)/DECIMAL_64) -> 12:decimal(4,2)
className: VectorMapJoinInnerStringOperator
native: true
nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS true
@@ -165,13 +166,13 @@ STAGE PLANS:
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [22]
- selectExpressions: VectorUDFAdaptor(hash(_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11,_col12,_col13,_col14,_col15,_col16,_col17,_col18,_col19,_col20,_col21)) -> 22:int
+ projectedOutputColumnNums: [23]
+ selectExpressions: VectorUDFAdaptor(hash(_col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11,_col12,_col13,_col14,_col15,_col16,_col17,_col18,_col19,_col20,_col21)) -> 23:int
Statistics: Num rows: 10000 Data size: 6819968 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: sum(_col0)
Group By Vectorization:
- aggregators: VectorUDAFSumLong(col 22:int) -> bigint
+ aggregators: VectorUDAFSumLong(col 23:int) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
native: false
@@ -193,8 +194,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -236,8 +237,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -391,8 +392,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -594,8 +595,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -637,8 +638,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_bround.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_bround.q.out b/ql/src/test/results/clientpositive/llap/vector_bround.q.out
index 59996d6..02770a2 100644
--- a/ql/src/test/results/clientpositive/llap/vector_bround.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_bround.q.out
@@ -95,8 +95,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
[03/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
index 74d774b..07a2c33 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
@@ -244,7 +244,7 @@ left outer join small_alltypesorc_a_n1 hd
on hd.cstring1 = c.cstring1
) t1
POSTHOOK: type: QUERY
-{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cint"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cint (type: int)","columnExprMap:":{"_col0":"cint"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":
"_col0 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cstring1 (type: string)","columnExprMap:":{"_col0":"cstring1"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["cint","cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","TableScan Vectorizat
ion:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cint (type: int), cstring1 (type: string)","columnExprMap:":{"_col0":"cint","_col1":"cstring1"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 6]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col1":"0:_col1"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: int)","1":"_col0 (type: int)"},"Map Join V
ectorization:":{"bigTableKeyExpressions:":["col 2:int"],"bigTableValueExpressions:":["col 6:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col1"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hash
table IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"sort order:":"","Reduce Sink Vectorization:":{"
className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[2, 6]","dataColumns:":["ctinyint:tinyint","csmallint:smallint",
"cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io
.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
+{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cint"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cint (type: int)","columnExprMap:":{"_col0":"cint"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":
"_col0 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cstring1 (type: string)","columnExprMap:":{"_col0":"cstring1"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["cint","cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","TableScan Vectorizat
ion:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cint (type: int), cstring1 (type: string)","columnExprMap:":{"_col0":"cint","_col1":"cstring1"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[2, 6]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col1":"0:_col1"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: int)","1":"_col0 (type: int)"},"Map Join V
ectorization:":{"bigTableKeyExpressions:":["col 2:int"],"bigTableValueExpressions:":["col 6:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col1"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hash
table IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"sort order:":"","Reduce Sink Vectorization:":{"
className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[DECIMAL_64]","featureSupportInUse:":"[DECIMAL_64]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[2, 6]","dataColumns:":["ctinyint:tinyint","
csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apac
he.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
PREHOOK: query: select count(*) from (select c.cstring1
from small_alltypesorc_a_n1 c
left outer join small_alltypesorc_a_n1 cd
@@ -284,7 +284,7 @@ left outer join small_alltypesorc_a_n1 hd
on hd.cstring1 = c.cstring1
) t1
POSTHOOK: type: QUERY
-{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cstring2 (type: string)","columnExprMap:":{"_col0":"cstring2"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator"
:{"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cstring1 (type: string)","columnExprMap:":{"_col0":"cstring1"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["cstring1","cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_
n1","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cstring1 (type: string), cstring2 (type: string)","columnExprMap:":{"_col0":"cstring1","_col1":"cstring2"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[6, 7]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: string
)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 7:string"],"bigTableValueExpressions:":["col 6:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:string"],"className:":"VectorMapJoinOperator","native:":"false","native
ConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"so
rt order:":"","Reduce Sink Vectorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[6, 7]","dataColumn
s:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","
output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
+{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cstring2 (type: string)","columnExprMap:":{"_col0":"cstring2"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator"
:{"keys:":{"0":"_col1 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cstring1 (type: string)","columnExprMap:":{"_col0":"cstring1"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: string)","1":"_col0 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["cstring1","cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_
n1","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cstring1 (type: string), cstring2 (type: string)","columnExprMap:":{"_col0":"cstring1","_col1":"cstring2"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[6, 7]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: string
)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 7:string"],"bigTableValueExpressions:":["col 6:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: string)","1":"_col0 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:string"],"className:":"VectorMapJoinOperator","native:":"false","native
ConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"so
rt order:":"","Reduce Sink Vectorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[DECIMAL_64]","featureSupportInUse:":"[DECIMAL_64]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":
"[6, 7]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.Sequen
ceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
PREHOOK: query: select count(*) from (select c.cstring1
from small_alltypesorc_a_n1 c
left outer join small_alltypesorc_a_n1 cd
@@ -324,7 +324,7 @@ left outer join small_alltypesorc_a_n1 hd
on hd.cstring1 = c.cstring1 and hd.cint = c.cint
) t1
POSTHOOK: type: QUERY
-{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cbigint","cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cbigint (type: bigint), cstring2 (type: string)","columnExprMap:":{"_col0":"cbigint","_col1":"cstring2"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE"
,"OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: bigint), _col3 (type: string)","1":"_col0 (type: bigint), _col1 (type: string)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["cint","cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cint (type: int), cstring1 (type: string)","columnExprMap:":{"_col0":"cint","_col1":"cstring1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: int), _col2 (type: string)","1":"_col0 (type: int), _col1 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableSca
n":{"alias:":"c","columns:":["cint","cbigint","cstring1","cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cint (type: int), cbigint (type: bigint), cstring1 (type: string), cstring2 (type: string)","columnExprMap:":{"_col0":"cint","_col1":"cbigint","_col2":"cstring1","_col3":"cstring2"},"outputColumnNames:":["_col0","_col1","_col2","_col3"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","p
rojectedOutputColumnNums:":"[2, 3, 6, 7]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col2":"0:_col2"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: bigint), _col3 (type: string)","1":"_col0 (type: bigint), _col1 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 3:bigint","col 7:string"],"bigTableValueExpressions:":["col 2:int","col 6:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"
,"_col2"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: int), _col2 (type: string)","1":"_col0 (type: int), _col1 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:int","col 1:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Grou
p By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"sort order:":"","Reduce Sink Vectorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}
}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[2, 3, 6, 7]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Redu
ce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
+{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cbigint","cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cbigint (type: bigint), cstring2 (type: string)","columnExprMap:":{"_col0":"cbigint","_col1":"cstring2"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE"
,"OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col1 (type: bigint), _col3 (type: string)","1":"_col0 (type: bigint), _col1 (type: string)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["cint","cstring1"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"cint (type: int), cstring1 (type: string)","columnExprMap:":{"_col0":"cint","_col1":"cstring1"},"outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: int), _col2 (type: string)","1":"_col0 (type: int), _col1 (type: string)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableSca
n":{"alias:":"c","columns:":["cint","cbigint","cstring1","cstring2"],"database:":"default","Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_a_n1","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"cint (type: int), cbigint (type: bigint), cstring1 (type: string), cstring2 (type: string)","columnExprMap:":{"_col0":"cint","_col1":"cbigint","_col2":"cstring1","_col3":"cstring2"},"outputColumnNames:":["_col0","_col1","_col2","_col3"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","p
rojectedOutputColumnNums:":"[2, 3, 6, 7]"},"Statistics:":"Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col2":"0:_col2"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: bigint), _col3 (type: string)","1":"_col0 (type: bigint), _col1 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 3:bigint","col 7:string"],"bigTableValueExpressions:":["col 2:int","col 6:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"
,"_col2"],"Statistics:":"Num rows: 22 Data size: 4840 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: int), _col2 (type: string)","1":"_col0 (type: int), _col1 (type: string)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:int","col 1:string"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 24 Data size: 5324 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Grou
p By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"sort order:":"","Reduce Sink Vectorization:":{"className:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}
}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[DECIMAL_64]","featureSupportInUse:":"[DECIMAL_64]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[2, 3, 6, 7]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spar
k] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
PREHOOK: query: select count(*) from (select c.cstring1
from small_alltypesorc_a_n1 c
left outer join small_alltypesorc_a_n1 cd
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/vector_outer_join4.q.out
index f26cfee..a96507d 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join4.q.out
@@ -258,7 +258,7 @@ from small_alltypesorc_b c
left outer join small_alltypesorc_b cd
on cd.cint = c.cint
POSTHOOK: type: QUERY
-{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-4":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-4"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-4":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["ctinyint","csmallint","cint","cbigint","cfloat","cdouble","cstring1","cstring2","ctimestamp1","ctimestamp2","cboolean1","cboolean2"],"database:":"default","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), c
string2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)","columnExprMap:":{"_col0":"ctinyint","_col1":"csmallint","_col10":"cboolean1","_col11":"cboolean2","_col2":"cint","_col3":"cbigint","_col4":"cfloat","_col5":"cdouble","_col6":"cstring1","_col7":"cstring2","_col8":"ctimestamp1","_col9":"ctimestamp2"},"outputColumnNames:":["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"],"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col2 (type: int)","1":"_col2 (type: int)"},"OperatorId:":"HASHTABLESINK_10"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["ctinyint","csmallint","cint","cbigint","cfloat","cdouble","cstring1","cstring2","ctimestamp1","ctimestamp2","cboolean1","cboolean2"],"database:":"defaul
t","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)","columnExprMap:":{"_col0":"ctinyint","_col1":"csmallint","_col10":"cboolean1","_col11":"cboolean2","
_col2":"cint","_col3":"cbigint","_col4":"cfloat","_col5":"cdouble","_col6":"cstring1","_col7":"cstring2","_col8":"ctimestamp1","_col9":"ctimestamp2"},"outputColumnNames:":["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]"},"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_12","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col10":"0:_col10","_col11":"0:_col11","_col12":"1:_col0","_col13":"1:_col1","_col14":"1:_col2","_col15":"1:_col3","_col16":"1:_col4","_col17":"1:_col5","_col18":"1:_col6","_col19":"1:_col7","_col2":"0:_col2","_col20":"1:_col8","_col21":"1:_col9","_col22":"1:_col10","_col23":"1:_col11","_col3":"0:_col3","_col4":"0:_col4","_col5":"0:_col5","_col6":"0:_col6","_col7":"0:_col7","_c
ol8":"0:_col8","_col9":"0:_col9"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col2 (type: int)","1":"_col2 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 2:int"],"bigTableValueExpressions:":["col 0:tinyint","col 1:smallint","col 2:int","col 3:bigint","col 4:float","col 5:double","col 6:string","col 7:string","col 8:timestamp","col 9:timestamp","col 10:boolean","col 11:boolean"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_c
ol14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23"],"Statistics:":"Num rows: 33 Data size: 7348 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_13","children":{"File Output Operator":{"compressed:":"false","File Sink Vectorization:":{"className:":"VectorFileSinkOperator","native:":"false"},"Statistics:":"Num rows: 33 Data size: 7348 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_14"}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUD
FAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[bigint, bigint, bigint, bigint, double, double, string, string, timestamp, timestamp, bigint, bigint]"}},"Local Work:":{"Map Reduce Local Work":{}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_15"}}}}}}
+{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-4":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-4"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-4":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["ctinyint","csmallint","cint","cbigint","cfloat","cdouble","cstring1","cstring2","ctimestamp1","ctimestamp2","cboolean1","cboolean2"],"database:":"default","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), c
string2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)","columnExprMap:":{"_col0":"ctinyint","_col1":"csmallint","_col10":"cboolean1","_col11":"cboolean2","_col2":"cint","_col3":"cbigint","_col4":"cfloat","_col5":"cdouble","_col6":"cstring1","_col7":"cstring2","_col8":"ctimestamp1","_col9":"ctimestamp2"},"outputColumnNames:":["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"],"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col2 (type: int)","1":"_col2 (type: int)"},"OperatorId:":"HASHTABLESINK_10"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["ctinyint","csmallint","cint","cbigint","cfloat","cdouble","cstring1","cstring2","ctimestamp1","ctimestamp2","cboolean1","cboolean2"],"database:":"defaul
t","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)","columnExprMap:":{"_col0":"ctinyint","_col1":"csmallint","_col10":"cboolean1","_col11":"cboolean2","
_col2":"cint","_col3":"cbigint","_col4":"cfloat","_col5":"cdouble","_col6":"cstring1","_col7":"cstring2","_col8":"ctimestamp1","_col9":"ctimestamp2"},"outputColumnNames:":["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]"},"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_12","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0","_col1":"0:_col1","_col10":"0:_col10","_col11":"0:_col11","_col12":"1:_col0","_col13":"1:_col1","_col14":"1:_col2","_col15":"1:_col3","_col16":"1:_col4","_col17":"1:_col5","_col18":"1:_col6","_col19":"1:_col7","_col2":"0:_col2","_col20":"1:_col8","_col21":"1:_col9","_col22":"1:_col10","_col23":"1:_col11","_col3":"0:_col3","_col4":"0:_col4","_col5":"0:_col5","_col6":"0:_col6","_col7":"0:_col7","_c
ol8":"0:_col8","_col9":"0:_col9"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col2 (type: int)","1":"_col2 (type: int)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 2:int"],"bigTableValueExpressions:":["col 0:tinyint","col 1:smallint","col 2:int","col 3:bigint","col 4:float","col 5:double","col 6:string","col 7:string","col 8:timestamp","col 9:timestamp","col 10:boolean","col 11:boolean"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_c
ol14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23"],"Statistics:":"Num rows: 33 Data size: 7348 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_13","children":{"File Output Operator":{"compressed:":"false","File Sink Vectorization:":{"className:":"VectorFileSinkOperator","native:":"false"},"Statistics:":"Num rows: 33 Data size: 7348 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_14"}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[DECIMAL_64]","featureSupportInUse:":"[DECIMAL_64]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"
false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[bigint, bigint, bigint, bigint, double, double, string, string, timestamp, timestamp, bigint, bigint]"}},"Local Work:":{"Map Reduce Local Work":{}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_15"}}}}}}
PREHOOK: query: select *
from small_alltypesorc_b c
left outer join small_alltypesorc_b cd
@@ -339,7 +339,7 @@ from small_alltypesorc_b c
left outer join small_alltypesorc_b hd
on hd.ctinyint = c.ctinyint
POSTHOOK: type: QUERY
-{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-4":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-4"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-4":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:hd":{"TableScan":{"alias:":"hd","columns:":["ctinyint"],"database:":"default","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint)","columnExprMap:":{"_col0":"ctinyint"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: tinyint)","1":"_col0
(type: tinyint)"},"OperatorId:":"HASHTABLESINK_10"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["ctinyint"],"database:":"default","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint)","columnExprMap:":{"_col0":"ctinyint"},"outputColumnNames:":["_col0"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0]"},"Statistics:":"Num row
s: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_12","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: tinyint)","1":"_col0 (type: tinyint)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:tinyint"],"bigTableValueExpressions:":["col 0:tinyint"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 33 Data size: 7348 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_13","children":{"File Output Operator":
{"compressed:":"false","File Sink Vectorization:":{"className:":"VectorFileSinkOperator","native:":"false"},"Statistics:":"Num rows: 33 Data size: 7348 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_14"}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[0]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:st
ring","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_15"}}}}}}
+{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-4":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-4"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-4":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:hd":{"TableScan":{"alias:":"hd","columns:":["ctinyint"],"database:":"default","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint)","columnExprMap:":{"_col0":"ctinyint"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: tinyint)","1":"_col0
(type: tinyint)"},"OperatorId:":"HASHTABLESINK_10"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["ctinyint"],"database:":"default","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","TableScan Vectorization:":{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint)","columnExprMap:":{"_col0":"ctinyint"},"outputColumnNames:":["_col0"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0]"},"Statistics:":"Num row
s: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_12","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: tinyint)","1":"_col0 (type: tinyint)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:tinyint"],"bigTableValueExpressions:":["col 0:tinyint"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 33 Data size: 7348 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_13","children":{"File Output Operator":
{"compressed:":"false","File Sink Vectorization:":{"className:":"VectorFileSinkOperator","native:":"false"},"Statistics:":"Num rows: 33 Data size: 7348 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_14"}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[DECIMAL_64]","featureSupportInUse:":"[DECIMAL_64]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[0]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:
double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_15"}}}}}}
PREHOOK: query: select c.ctinyint
from small_alltypesorc_b c
left outer join small_alltypesorc_b hd
@@ -782,7 +782,7 @@ left outer join small_alltypesorc_b hd
on hd.ctinyint = c.ctinyint
) t1
POSTHOOK: type: QUERY
-{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cint"],"database:":"default","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cint (type: int)","columnExprMap:":{"_col0":"cint"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_c
ol1 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["ctinyint"],"database:":"default","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint)","columnExprMap:":{"_col0":"ctinyint"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: tinyint)","1":"_col0 (type: tinyint)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["ctinyint","cint"],"database:":"default","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","TableScan Vectorization:":
{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint), cint (type: int)","columnExprMap:":{"_col0":"ctinyint","_col1":"cint"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 2]"},"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: int)","1":"_col0 (type: int)"},"Map Join Vector
ization:":{"bigTableKeyExpressions:":["col 2:int"],"bigTableValueExpressions:":["col 0:tinyint"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 33 Data size: 7348 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: tinyint)","1":"_col0 (type: tinyint)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:tinyint"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hasht
able IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 36 Data size: 8082 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"sort order:":"","Reduce Sink Vectorization:":{"c
lassName:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[]","featureSupportInUse:":"[]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[0, 2]","dataColumns:":["ctinyint:tinyint","csmallint:smallint","
cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.
HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
+{"PLAN VECTORIZATION":{"enabled":true,"enabledConditionsMet":["hive.vectorized.execution.enabled IS true"]},"STAGE DEPENDENCIES":{"Stage-8":{"ROOT STAGE":"TRUE"},"Stage-3":{"DEPENDENT STAGES":"Stage-8"},"Stage-0":{"DEPENDENT STAGES":"Stage-3"}},"STAGE PLANS":{"Stage-8":{"Map Reduce Local Work":{"Alias -> Map Local Tables:":{"$hdt$_1:cd":{"Fetch Operator":{"limit:":"-1"}},"$hdt$_2:hd":{"Fetch Operator":{"limit:":"-1"}}},"Alias -> Map Local Operator Tree:":{"$hdt$_1:cd":{"TableScan":{"alias:":"cd","columns:":["cint"],"database:":"default","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","isTempTable:":"false","OperatorId:":"TS_2","children":{"Select Operator":{"expressions:":"cint (type: int)","columnExprMap:":{"_col0":"cint"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_3","children":{"HashTable Sink Operator":{"keys:":{"0":"_c
ol1 (type: int)","1":"_col0 (type: int)"},"OperatorId:":"HASHTABLESINK_26"}}}}}},"$hdt$_2:hd":{"TableScan":{"alias:":"hd","columns:":["ctinyint"],"database:":"default","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","isTempTable:":"false","OperatorId:":"TS_4","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint)","columnExprMap:":{"_col0":"ctinyint"},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_5","children":{"HashTable Sink Operator":{"keys:":{"0":"_col0 (type: tinyint)","1":"_col0 (type: tinyint)"},"OperatorId:":"HASHTABLESINK_24"}}}}}}}}},"Stage-3":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"c","columns:":["ctinyint","cint"],"database:":"default","Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","table:":"small_alltypesorc_b","TableScan Vectorization:":
{"native:":"true","vectorizationSchemaColumns:":"[0:ctinyint:tinyint, 1:csmallint:smallint, 2:cint:int, 3:cbigint:bigint, 4:cfloat:float, 5:cdouble:double, 6:cstring1:string, 7:cstring2:string, 8:ctimestamp1:timestamp, 9:ctimestamp2:timestamp, 10:cboolean1:boolean, 11:cboolean2:boolean, 12:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]"},"isTempTable:":"false","OperatorId:":"TS_0","children":{"Select Operator":{"expressions:":"ctinyint (type: tinyint), cint (type: int)","columnExprMap:":{"_col0":"ctinyint","_col1":"cint"},"outputColumnNames:":["_col0","_col1"],"Select Vectorization:":{"className:":"VectorSelectOperator","native:":"true","projectedOutputColumnNums:":"[0, 2]"},"Statistics:":"Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"SEL_28","children":{"Map Join Operator":{"columnExprMap:":{"_col0":"0:_col0"},"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col1 (type: int)","1":"_col0 (type: int)"},"Map Join Vector
ization:":{"bigTableKeyExpressions:":["col 2:int"],"bigTableValueExpressions:":["col 0:tinyint"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hashtable IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"outputColumnNames:":["_col0"],"Statistics:":"Num rows: 33 Data size: 7348 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_29","children":{"Map Join Operator":{"condition map:":[{"":"Left Outer Join 0 to 1"}],"keys:":{"0":"_col0 (type: tinyint)","1":"_col0 (type: tinyint)"},"Map Join Vectorization:":{"bigTableKeyExpressions:":["col 0:tinyint"],"className:":"VectorMapJoinOperator","native:":"false","nativeConditionsMet:":["hive.mapjoin.optimized.hasht
able IS true","hive.vectorized.execution.mapjoin.native.enabled IS true","One MapJoin Condition IS true","No nullsafe IS true","Small table vectorizes IS true","Outer Join has keys IS true","Optimized Table and Supports Key Types IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 36 Data size: 8082 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"MAPJOIN_30","children":{"Group By Operator":{"aggregations:":["count()"],"Group By Vectorization:":{"aggregators:":["VectorUDAFCountStar(*) -> bigint"],"className:":"VectorGroupByOperator","groupByMode:":"HASH","native:":"false","vectorProcessingMode:":"HASH","projectedOutputColumnNums:":"[0]"},"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_31","children":{"Reduce Output Operator":{"columnExprMap:":{"VALUE._col0":"_col0"},"sort order:":"","Reduce Sink Vectorization:":{"c
lassName:":"VectorReduceSinkOperator","native:":"false","nativeConditionsMet:":["hive.vectorized.execution.reducesink.new.enabled IS true","No PTF TopN IS true","No DISTINCT columns IS true","BinarySortableSerDe for keys IS true","LazyBinarySerDe for values IS true"],"nativeConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: bigint)","OperatorId:":"RS_32"}}}}}}}}}}}}],"Execution mode:":"vectorized","Map Vectorization:":{"enabled:":"true","enabledConditionsMet:":["hive.vectorized.use.vectorized.input.format IS true"],"inputFormatFeatureSupport:":"[DECIMAL_64]","featureSupportInUse:":"[DECIMAL_64]","inputFileFormats:":["org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"],"allNative:":"false","usesVectorUDFAdaptor:":"false","vectorized:":"true","rowBatchContext:":{"dataColumnCount:":"12","includeColumns:":"[0, 2]","dataColumns:":["ctinyint:tinyint","c
smallint:smallint","cint:int","cbigint:bigint","cfloat:float","cdouble:double","cstring1:string","cstring2:string","ctimestamp1:timestamp","ctimestamp2:timestamp","cboolean1:boolean","cboolean2:boolean"],"partitionColumnCount:":"0","scratchColumnTypeNames:":"[]"}},"Local Work:":{"Map Reduce Local Work":{}},"Reduce Vectorization:":{"enabled:":"false","enableConditionsMet:":["hive.vectorized.execution.reduce.enabled IS true"],"enableConditionsNotMet:":["hive.execution.engine mr IN [tez, spark] IS false"]},"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","OperatorId:":"GBY_15","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apach
e.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"},"OperatorId:":"FS_17"}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{"OperatorId:":"LIST_SINK_33"}}}}}}
PREHOOK: query: select count(*) from (select c.ctinyint
from small_alltypesorc_b c
left outer join small_alltypesorc_b cd
[06/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_case_when_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_case_when_1.q.out b/ql/src/test/results/clientpositive/vector_case_when_1.q.out
index 66807ac..59d8133 100644
--- a/ql/src/test/results/clientpositive/vector_case_when_1.q.out
+++ b/ql/src/test/results/clientpositive/vector_case_when_1.q.out
@@ -140,7 +140,6 @@ SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT
@@ -182,7 +181,6 @@ SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
POSTHOOK: type: QUERY
Explain
PLAN VECTORIZATION:
@@ -204,33 +202,19 @@ STAGE PLANS:
expressions: l_quantity (type: int), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE ('Huge number') END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE (null) END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN (null) ELSE (null) END (type: string), if((l_shipmode = 'SHIP '), date_add(l_shipdate, 10), date_add(l_shipdate, 5)) (type: date), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0) END (type: double), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0.0D) END (type: double), if((UDFToString(l_shipinstruct) = 'DELIVER
IN PERSON'), null, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, null) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(10,2)), if((l_partkey > 30), CAST( l_receiptdate AS TIMESTAMP), CAST( l_commitdate AS TIMESTAMP)) (type: timestamp), if((l_suppkey > 10000), datediff(l_receiptdate, l_commitdate), null) (type: int), if((l_suppkey > 10000), null, datediff(l_receiptdate, l_commitdate)) (type: int), if(((l_suppkey % 500) > 100), DATE'2009-01-01', DATE'2009-12-31') (type: date)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: int)
- sort order: +
+ File Output Operator
+ compressed: false
Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: date), _col5 (type: double), _col6 (type: double), _col7 (type: decimal(10,2)), _col8 (type: decimal(10,2)), _col9 (type: decimal(12,2)), _col10 (type: decimal(12,2)), _col11 (type: decimal(10,2)), _col12 (type: decimal(10,2)), _col13 (type: timestamp), _col14 (type: int), _col15 (type: int), _col16 (type: date)
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
notVectorizedReason: SELECT operator: Unexpected hive type name void
vectorized: false
- Reduce Vectorization:
- enabled: false
- enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
- enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: date), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: decimal(10,2)), VALUE._col7 (type: decimal(10,2)), VALUE._col8 (type: decimal(12,2)), VALUE._col9 (type: decimal(12,2)), VALUE._col10 (type: decimal(10,2)), VALUE._col11 (type: decimal(10,2)), VALUE._col12 (type: timestamp), VALUE._col13 (type: int), VALUE._col14 (type: int), VALUE._col15 (type: date)
- outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
- Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
@@ -277,7 +261,6 @@ PREHOOK: query: SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
PREHOOK: type: QUERY
PREHOOK: Input: default@lineitem_test
#### A masked pattern was here ####
@@ -320,112 +303,111 @@ POSTHOOK: query: SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
POSTHOOK: type: QUERY
POSTHOOK: Input: default@lineitem_test
#### A masked pattern was here ####
quantity quantity_description quantity_description_2 quantity_description_3 expected_date field_1 field_2 field_3 field_4 field_5 field_6 field_7 field_8 field_9 field_10 field_11 field_12
-NULL Huge number NULL NULL NULL 0.0 0.0 NULL NULL NULL 0.00 NULL 0.00 NULL NULL NULL 2009-12-31
-1 Single Single Single 1994-12-06 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-12-15 00:00:00 NULL 3 2009-01-01
1 Single Single Single 1994-01-31 0.0 0.0 0.05 0.05 0.05 0.05 0.05 0.05 1994-01-28 00:00:00 NULL -36 2009-01-01
-2 Two Two Two 1995-08-12 2011.3912000000003 2011.3912000000003 NULL NULL 0.00 0.00 0.00 0.00 1995-08-23 00:00:00 NULL -45 2009-01-01
-2 Two Two Two 1993-12-09 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-01-01 00:00:00 NULL -6 2009-01-01
-3 Some Some Some 1994-06-11 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-06-15 00:00:00 NULL -42 2009-12-31
-3 Some Some Some 1998-06-02 5137.6143 5137.6143 0.07 NULL 0.07 0.00 0.07 0.00 1998-06-02 00:00:00 NULL 60 2009-01-01
-3 Some Some Some 1998-07-09 2778.921 2778.921 0.02 NULL 0.02 0.00 0.02 0.00 1998-07-21 00:00:00 NULL 46 2009-12-31
-4 Some Some Some 1995-08-09 5990.4936 5990.4936 0.03 NULL 0.03 0.00 0.03 0.00 1995-09-03 00:00:00 NULL -28 2009-01-01
-4 Some Some Some 1997-04-27 5669.7732000000005 5669.7732000000005 0.04 NULL 0.04 0.00 0.04 0.00 1997-04-20 00:00:00 NULL 79 2009-01-01
-5 Some Some Some 1997-02-25 8116.96 8116.96 NULL NULL 0.00 0.00 0.00 0.00 1997-02-21 00:00:00 NULL 9 2009-01-01
-5 Some Some Some 1996-02-15 6217.103999999999 6217.103999999999 0.02 NULL 0.02 0.00 0.02 0.00 1996-02-13 00:00:00 NULL -42 2009-01-01
-5 Some Some Some 1993-12-14 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1993-12-23 00:00:00 NULL -2 2009-01-01
-6 Some Some Some 1998-11-04 9487.6152 9487.6152 0.06 NULL 0.06 0.00 0.06 0.00 1998-11-05 00:00:00 NULL 46 2009-12-31
-6 Some Some Some 1995-07-26 8793.2736 8793.2736 0.03 NULL 0.03 0.00 0.03 0.00 1995-07-25 00:00:00 NULL -60 2009-01-01
-7 Some Some Some 1996-01-24 12613.136199999999 12613.136199999999 0.04 NULL 0.04 0.00 0.04 0.00 1996-01-29 00:00:00 NULL 38 2009-01-01
-8 Some Some Some 1994-01-17 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-01-14 00:00:00 NULL -44 2009-01-01
-8 Some Some Some 1996-02-03 11978.640000000001 11978.640000000001 0.02 0.02 0.02 0.02 0.02 0.02 1996-01-31 00:00:00 NULL -34 2009-01-01
-9 Some Some Some 1996-02-11 10666.6272 10666.6272 0.08 0.08 0.08 0.08 0.08 0.08 1996-02-19 00:00:00 NULL -12 2009-01-01
+1 Single Single Single 1994-12-06 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-12-15 00:00:00 NULL 3 2009-01-01
11 Many Many NULL 1994-03-22 0.0 0.0 0.05 NULL 0.05 0.00 0.05 0.00 1994-03-27 00:00:00 NULL 10 2009-01-01
12 Many Many NULL 1996-05-12 12655.998 12655.998 0.03 0.03 0.03 0.03 0.03 0.03 1996-06-03 00:00:00 NULL 82 2009-01-01
12 Many Many NULL 1997-02-01 12156.034800000001 12156.034800000001 0.05 NULL 0.05 0.00 0.05 0.00 1997-02-22 00:00:00 NULL 1 2009-01-01
+13 Many Many NULL 1993-04-06 0.0 0.0 0.02 NULL 0.02 0.00 0.02 0.00 1993-04-08 00:00:00 NULL 4 2009-01-01
13 Many Many NULL 1994-03-08 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-03-26 00:00:00 NULL 41 2009-01-01
13 Many Many NULL 1998-10-28 17554.68 17554.68 0.07 NULL 0.07 0.00 0.07 0.00 1998-11-06 00:00:00 NULL 53 2009-01-01
-13 Many Many NULL 1993-04-06 0.0 0.0 0.02 NULL 0.02 0.00 0.02 0.00 1993-04-08 00:00:00 NULL 4 2009-01-01
14 Many Many NULL 1995-01-04 0.0 0.0 0.02 NULL 0.02 0.00 0.02 0.00 1995-01-27 00:00:00 NULL 66 2009-01-01
15 Many Many NULL 1994-11-05 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-11-20 00:00:00 NULL 81 2009-12-31
-17 Many Many NULL 1996-03-18 20321.500799999998 20321.500799999998 NULL NULL 0.00 0.00 0.00 0.00 1996-03-22 00:00:00 NULL 39 2009-01-01
17 Many Many NULL 1994-07-07 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1994-07-03 00:00:00 NULL -4 2009-01-01
+17 Many Many NULL 1996-03-18 20321.500799999998 20321.500799999998 NULL NULL 0.00 0.00 0.00 0.00 1996-03-22 00:00:00 NULL 39 2009-01-01
19 Many Many NULL 1993-05-19 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1993-05-25 00:00:00 NULL 81 2009-01-01
19 Many Many NULL 1994-02-05 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1994-02-06 00:00:00 NULL -11 2009-01-01
+2 Two Two Two 1993-12-09 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-01-01 00:00:00 NULL -6 2009-01-01
+2 Two Two Two 1995-08-12 2011.3912000000003 2011.3912000000003 NULL NULL 0.00 0.00 0.00 0.00 1995-08-23 00:00:00 NULL -45 2009-01-01
20 Many Many NULL 1998-07-02 32042.592 32042.592 0.01 NULL 0.01 0.00 0.01 0.00 1998-07-02 00:00:00 NULL 40 2009-01-01
-21 Many Many NULL 1995-07-11 24640.0518 24640.0518 NULL NULL 0.00 0.00 0.00 0.00 1995-07-31 00:00:00 NULL 78 2009-01-01
21 Many Many NULL 1994-10-05 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-10-26 00:00:00 NULL 38 2009-01-01
-22 Many Many NULL 1998-10-14 28405.0184 28405.0184 0.06 NULL 0.06 0.00 0.06 0.00 1998-10-12 00:00:00 NULL -4 2009-01-01
+21 Many Many NULL 1995-07-11 24640.0518 24640.0518 NULL NULL 0.00 0.00 0.00 0.00 1995-07-31 00:00:00 NULL 78 2009-01-01
22 Many Many NULL 1995-07-22 39353.82 39353.82 0.05 NULL 0.05 0.00 0.05 0.00 1995-07-19 00:00:00 NULL 45 2009-01-01
-23 Many Many NULL 1997-04-24 33946.3785 33946.3785 NULL NULL 0.00 0.00 0.00 0.00 1997-05-06 00:00:00 NULL 81 2009-01-01
-23 Many Many NULL 1994-10-13 0.0 0.0 0.00 NULL 0.00 0.00 0.00 0.00 1994-10-24 00:00:00 NULL 79 2009-12-31
+22 Many Many NULL 1998-10-14 28405.0184 28405.0184 0.06 NULL 0.06 0.00 0.06 0.00 1998-10-12 00:00:00 NULL -4 2009-01-01
23 Many Many NULL 1994-07-24 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-07-25 00:00:00 NULL 26 2009-01-01
-24 Many Many NULL 1996-04-04 20542.032 20542.032 0.04 NULL 0.04 0.00 0.04 0.00 1996-04-01 00:00:00 NULL 18 2009-12-31
+23 Many Many NULL 1994-10-13 0.0 0.0 0.00 NULL 0.00 0.00 0.00 0.00 1994-10-24 00:00:00 NULL 79 2009-12-31
+23 Many Many NULL 1997-04-24 33946.3785 33946.3785 NULL NULL 0.00 0.00 0.00 0.00 1997-05-06 00:00:00 NULL 81 2009-01-01
24 Many Many NULL 1996-02-26 31762.584 31762.584 0.00 0.00 0.00 0.00 0.00 0.00 1996-03-18 00:00:00 NULL 75 2009-01-01
-25 Many Many NULL 1998-04-15 43064.1575 43064.1575 0.07 NULL 0.07 0.00 0.07 0.00 1998-04-11 00:00:00 NULL -11 2009-01-01
+24 Many Many NULL 1996-04-04 20542.032 20542.032 0.04 NULL 0.04 0.00 0.04 0.00 1996-04-01 00:00:00 NULL 18 2009-12-31
25 Many Many NULL 1995-12-06 27263.995 27263.995 NULL NULL 0.00 0.00 0.00 0.00 1995-12-21 00:00:00 NULL -4 2009-01-01
-26 Many Many NULL 1996-11-09 39912.433600000004 39912.433600000004 0.04 NULL 0.04 0.00 0.04 0.00 1996-11-20 00:00:00 NULL 31 2009-01-01
-26 Many Many NULL 1995-04-25 0.0 0.0 0.03 NULL 0.03 0.00 0.03 0.00 1995-05-13 00:00:00 NULL 18 2009-01-01
-26 Many Many NULL 1994-10-21 0.0 0.0 0.08 NULL 0.08 0.00 0.08 0.00 1994-10-19 00:00:00 NULL 24 2009-01-01
+25 Many Many NULL 1998-04-15 43064.1575 43064.1575 0.07 NULL 0.07 0.00 0.07 0.00 1998-04-11 00:00:00 NULL -11 2009-01-01
26 Many Many NULL 1993-11-03 0.0 0.0 0.02 0.02 0.02 0.02 0.02 0.02 1993-11-04 00:00:00 NULL -44 2009-01-01
+26 Many Many NULL 1994-10-21 0.0 0.0 0.08 NULL 0.08 0.00 0.08 0.00 1994-10-19 00:00:00 NULL 24 2009-01-01
+26 Many Many NULL 1995-04-25 0.0 0.0 0.03 NULL 0.03 0.00 0.03 0.00 1995-05-13 00:00:00 NULL 18 2009-01-01
+26 Many Many NULL 1996-11-09 39912.433600000004 39912.433600000004 0.04 NULL 0.04 0.00 0.04 0.00 1996-11-20 00:00:00 NULL 31 2009-01-01
27 Many Many NULL 1994-01-26 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-01-23 00:00:00 NULL 62 2009-01-01
27 Many Many NULL 1998-06-29 45590.2425 45590.2425 NULL NULL 0.00 0.00 0.00 0.00 1998-06-29 00:00:00 NULL 4 2009-01-01
-28 Many Many NULL 1995-10-28 44866.219999999994 44866.219999999994 0.08 0.08 0.08 0.08 0.08 0.08 1995-10-26 00:00:00 NULL 60 2009-01-01
-28 Many Many NULL 1994-12-29 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1995-01-16 00:00:00 NULL 83 2009-01-01
-28 Many Many NULL 1996-04-26 26349.6324 26349.6324 0.06 NULL 0.06 0.00 0.06 0.00 1996-05-16 00:00:00 NULL 47 2009-01-01
-28 Many Many NULL 1996-03-26 30855.6612 30855.6612 0.04 NULL 0.04 0.00 0.04 0.00 1996-04-20 00:00:00 NULL 12 2009-12-31
28 Many Many NULL 1993-12-19 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1994-01-01 00:00:00 NULL -9 2009-01-01
+28 Many Many NULL 1994-12-29 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1995-01-16 00:00:00 NULL 83 2009-01-01
+28 Many Many NULL 1995-10-28 44866.219999999994 44866.219999999994 0.08 0.08 0.08 0.08 0.08 0.08 1995-10-26 00:00:00 NULL 60 2009-01-01
28 Many Many NULL 1996-02-06 45975.3616 45975.3616 0.02 NULL 0.02 0.00 0.02 0.00 1996-02-28 00:00:00 NULL 66 2009-01-01
+28 Many Many NULL 1996-03-26 30855.6612 30855.6612 0.04 NULL 0.04 0.00 0.04 0.00 1996-04-20 00:00:00 NULL 12 2009-12-31
+28 Many Many NULL 1996-04-26 26349.6324 26349.6324 0.06 NULL 0.06 0.00 0.06 0.00 1996-05-16 00:00:00 NULL 47 2009-01-01
29 Many Many NULL 1997-01-30 39341.806 39341.806 NULL NULL 0.00 0.00 0.00 0.00 1997-01-27 00:00:00 NULL 0 2009-01-01
+3 Some Some Some 1994-06-11 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-06-15 00:00:00 NULL -42 2009-12-31
+3 Some Some Some 1998-06-02 5137.6143 5137.6143 0.07 NULL 0.07 0.00 0.07 0.00 1998-06-02 00:00:00 NULL 60 2009-01-01
+3 Some Some Some 1998-07-09 2778.921 2778.921 0.02 NULL 0.02 0.00 0.02 0.00 1998-07-21 00:00:00 NULL 46 2009-12-31
30 Many Many NULL 1994-06-08 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-06-22 00:00:00 NULL 24 2009-01-01
30 Many Many NULL 1996-01-15 29770.173 29770.173 NULL NULL 0.00 0.00 0.00 0.00 1996-01-18 00:00:00 NULL 35 2009-12-31
30 Many Many NULL 1998-08-16 44561.46 44561.46 0.06 NULL 0.06 0.00 0.06 0.00 1998-08-14 00:00:00 NULL 34 2009-12-31
-31 Many Many NULL 1994-02-24 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-02-20 00:00:00 NULL -19 2009-01-01
31 Many Many NULL 1993-11-03 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1993-11-08 00:00:00 NULL -41 2009-01-01
-32 Many Many NULL 1995-08-19 63313.3312 63313.3312 0.00 NULL 0.00 0.00 0.00 0.00 1995-08-27 00:00:00 NULL -41 2009-01-01
+31 Many Many NULL 1994-02-24 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-02-20 00:00:00 NULL -19 2009-01-01
32 Many Many NULL 1993-12-14 0.0 0.0 0.05 NULL 0.05 0.00 0.05 0.00 1993-12-28 00:00:00 NULL -7 2009-12-31
32 Many Many NULL 1994-08-29 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-08-31 00:00:00 NULL 14 2009-01-01
-32 Many Many NULL 1996-10-07 44955.15839999999 44955.15839999999 0.05 NULL 0.05 0.00 0.05 0.00 1996-10-14 00:00:00 NULL -66 2009-12-31
+32 Many Many NULL 1995-08-19 63313.3312 63313.3312 0.00 NULL 0.00 0.00 0.00 0.00 1995-08-27 00:00:00 NULL -41 2009-01-01
32 Many Many NULL 1996-02-04 46146.7488 46146.7488 NULL NULL 0.00 0.00 0.00 0.00 1996-02-03 00:00:00 NULL -4 2009-01-01
+32 Many Many NULL 1996-10-07 44955.15839999999 44955.15839999999 0.05 NULL 0.05 0.00 0.05 0.00 1996-10-14 00:00:00 NULL -66 2009-12-31
33 Many Many NULL 1998-04-17 54174.12 54174.12 0.01 NULL 0.01 0.00 0.01 0.00 1998-04-15 00:00:00 NULL 26 2009-01-01
-34 Many Many NULL 1998-03-10 56487.763199999994 56487.763199999994 NULL NULL 0.00 0.00 0.00 0.00 1998-03-30 00:00:00 NULL -23 2009-01-01
-34 Many Many NULL 1996-01-27 63982.002400000005 63982.002400000005 NULL NULL 0.00 0.00 0.00 0.00 1996-01-27 00:00:00 NULL 21 2009-01-01
34 Many Many NULL 1995-11-13 60586.5448 60586.5448 0.06 NULL 0.06 0.00 0.06 0.00 1995-11-26 00:00:00 NULL -50 2009-01-01
+34 Many Many NULL 1996-01-27 63982.002400000005 63982.002400000005 NULL NULL 0.00 0.00 0.00 0.00 1996-01-27 00:00:00 NULL 21 2009-01-01
+34 Many Many NULL 1998-03-10 56487.763199999994 56487.763199999994 NULL NULL 0.00 0.00 0.00 0.00 1998-03-30 00:00:00 NULL -23 2009-01-01
35 Many Many NULL 1996-01-21 40475.225 40475.225 0.03 0.03 0.03 0.03 0.03 0.03 1996-01-22 00:00:00 NULL -32 2009-01-01
36 Many Many NULL 1996-04-17 41844.6756 41844.6756 0.06 0.06 0.06 0.06 0.06 0.06 1996-04-20 00:00:00 NULL 52 2009-01-01
-37 Many Many NULL 1994-02-18 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-02-21 00:00:00 NULL -23 2009-01-01
-37 Many Many NULL 1993-04-23 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1993-04-14 00:00:00 NULL 15 2009-12-31
37 Many Many NULL 1992-05-02 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1992-05-02 00:00:00 NULL -13 2009-01-01
-38 Many Many NULL 1997-02-02 44694.46 44694.46 0.05 0.05 0.05 0.05 0.05 0.05 1997-02-02 00:00:00 NULL 19 2009-01-01
+37 Many Many NULL 1993-04-23 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1993-04-14 00:00:00 NULL 15 2009-12-31
+37 Many Many NULL 1994-02-18 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-02-21 00:00:00 NULL -23 2009-01-01
38 Many Many NULL 1996-02-16 68028.3144 68028.3144 NULL NULL 0.00 0.00 0.00 0.00 1996-02-18 00:00:00 NULL -6 2009-01-01
-39 Many Many NULL 1998-02-03 45146.01 45146.01 NULL NULL 0.00 0.00 0.00 0.00 1998-02-18 00:00:00 NULL -48 2009-01-01
+38 Many Many NULL 1997-02-02 44694.46 44694.46 0.05 0.05 0.05 0.05 0.05 0.05 1997-02-02 00:00:00 NULL 19 2009-01-01
39 Many Many NULL 1992-07-07 0.0 0.0 0.02 0.02 0.02 0.02 0.02 0.02 1992-07-28 00:00:00 NULL -21 2009-01-01
-40 Many Many NULL 1996-12-13 51224.736 51224.736 0.05 NULL 0.05 0.00 0.05 0.00 1997-01-01 00:00:00 NULL 71 2009-01-01
+39 Many Many NULL 1998-02-03 45146.01 45146.01 NULL NULL 0.00 0.00 0.00 0.00 1998-02-18 00:00:00 NULL -48 2009-01-01
+4 Some Some Some 1995-08-09 5990.4936 5990.4936 0.03 NULL 0.03 0.00 0.03 0.00 1995-09-03 00:00:00 NULL -28 2009-01-01
+4 Some Some Some 1997-04-27 5669.7732000000005 5669.7732000000005 0.04 NULL 0.04 0.00 0.04 0.00 1997-04-20 00:00:00 NULL 79 2009-01-01
40 Many Many NULL 1992-07-26 0.0 0.0 0.03 NULL 0.03 0.00 0.03 0.00 1992-08-15 00:00:00 NULL 14 2009-01-01
-41 Many Many NULL 1998-07-04 47989.6144 47989.6144 0.08 NULL 0.08 0.00 0.08 0.00 1998-07-06 00:00:00 NULL 9 2009-01-01
-41 Many Many NULL 1994-02-26 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1994-03-18 00:00:00 NULL 17 2009-01-01
+40 Many Many NULL 1996-12-13 51224.736 51224.736 0.05 NULL 0.05 0.00 0.05 0.00 1997-01-01 00:00:00 NULL 71 2009-01-01
41 Many Many NULL 1993-11-14 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1993-11-11 00:00:00 NULL -74 2009-01-01
+41 Many Many NULL 1994-02-26 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1994-03-18 00:00:00 NULL 17 2009-01-01
+41 Many Many NULL 1998-07-04 47989.6144 47989.6144 0.08 NULL 0.08 0.00 0.08 0.00 1998-07-06 00:00:00 NULL 9 2009-01-01
42 Many Many NULL 1994-08-05 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-08-28 00:00:00 NULL 33 2009-12-31
42 Many Many NULL 1996-02-13 68289.9672 68289.9672 0.00 NULL 0.00 0.00 0.00 0.00 1996-02-23 00:00:00 NULL 33 2009-01-01
-43 Many Many NULL 1996-10-22 62727.3207 62727.3207 0.01 NULL 0.01 0.00 0.01 0.00 1996-10-26 00:00:00 NULL -19 2009-12-31
43 Many Many NULL 1992-07-15 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1992-08-02 00:00:00 NULL 27 2009-01-01
-44 Many Many NULL 1997-03-23 60781.124800000005 60781.124800000005 NULL NULL 0.00 0.00 0.00 0.00 1997-04-13 00:00:00 NULL 74 2009-12-31
-44 Many Many NULL 1996-10-04 80882.4192 80882.4192 0.02 NULL 0.02 0.00 0.02 0.00 1996-09-30 00:00:00 NULL -48 2009-01-01
+43 Many Many NULL 1996-10-22 62727.3207 62727.3207 0.01 NULL 0.01 0.00 0.01 0.00 1996-10-26 00:00:00 NULL -19 2009-12-31
44 Many Many NULL 1995-09-02 75106.658 75106.658 NULL NULL 0.00 0.00 0.00 0.00 1995-09-14 00:00:00 NULL 25 2009-01-01
+44 Many Many NULL 1996-10-04 80882.4192 80882.4192 0.02 NULL 0.02 0.00 0.02 0.00 1996-09-30 00:00:00 NULL -48 2009-01-01
44 Many Many NULL 1996-11-19 48941.692800000004 48941.692800000004 0.06 NULL 0.06 0.00 0.06 0.00 1996-12-12 00:00:00 NULL -3 2009-01-01
-45 Many Many NULL 1998-03-05 61489.35 61489.35 NULL NULL 0.00 0.00 0.00 0.00 1998-03-24 00:00:00 NULL 4 2009-01-01
+44 Many Many NULL 1997-03-23 60781.124800000005 60781.124800000005 NULL NULL 0.00 0.00 0.00 0.00 1997-04-13 00:00:00 NULL 74 2009-12-31
45 Many Many NULL 1994-02-07 0.0 0.0 0.00 NULL 0.00 0.00 0.00 0.00 1994-02-23 00:00:00 NULL 50 2009-01-01
+45 Many Many NULL 1998-03-05 61489.35 61489.35 NULL NULL 0.00 0.00 0.00 0.00 1998-03-24 00:00:00 NULL 4 2009-01-01
46 Many Many NULL 1996-01-20 73475.892 73475.892 0.07 NULL 0.07 0.00 0.07 0.00 1996-02-03 00:00:00 NULL -53 2009-01-01
+46 Many Many NULL 1996-10-01 77781.4092 77781.4092 NULL NULL 0.00 0.00 0.00 0.00 1996-10-26 00:00:00 NULL -54 2009-01-01
46 Many Many NULL 1998-07-01 56583.5144 56583.5144 0.05 NULL 0.05 0.00 0.05 0.00 1998-07-05 00:00:00 NULL 28 2009-01-01
46 Many Many NULL 1998-08-18 84565.5168 84565.5168 0.05 NULL 0.05 0.00 0.05 0.00 1998-08-29 00:00:00 NULL 52 2009-01-01
-46 Many Many NULL 1996-10-01 77781.4092 77781.4092 NULL NULL 0.00 0.00 0.00 0.00 1996-10-26 00:00:00 NULL -54 2009-01-01
48 Many Many NULL 1994-08-22 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1994-09-08 00:00:00 NULL 28 2009-01-01
49 Many Many NULL 1993-11-14 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1993-11-24 00:00:00 NULL -26 2009-12-31
+5 Some Some Some 1993-12-14 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1993-12-23 00:00:00 NULL -2 2009-01-01
+5 Some Some Some 1996-02-15 6217.103999999999 6217.103999999999 0.02 NULL 0.02 0.00 0.02 0.00 1996-02-13 00:00:00 NULL -42 2009-01-01
+5 Some Some Some 1997-02-25 8116.96 8116.96 NULL NULL 0.00 0.00 0.00 0.00 1997-02-21 00:00:00 NULL 9 2009-01-01
50 Many Many NULL 1994-08-13 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-08-26 00:00:00 NULL -48 2009-12-31
+6 Some Some Some 1995-07-26 8793.2736 8793.2736 0.03 NULL 0.03 0.00 0.03 0.00 1995-07-25 00:00:00 NULL -60 2009-01-01
+6 Some Some Some 1998-11-04 9487.6152 9487.6152 0.06 NULL 0.06 0.00 0.06 0.00 1998-11-05 00:00:00 NULL 46 2009-12-31
+7 Some Some Some 1996-01-24 12613.136199999999 12613.136199999999 0.04 NULL 0.04 0.00 0.04 0.00 1996-01-29 00:00:00 NULL 38 2009-01-01
+8 Some Some Some 1994-01-17 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-01-14 00:00:00 NULL -44 2009-01-01
+8 Some Some Some 1996-02-03 11978.640000000001 11978.640000000001 0.02 0.02 0.02 0.02 0.02 0.02 1996-01-31 00:00:00 NULL -34 2009-01-01
+9 Some Some Some 1996-02-11 10666.6272 10666.6272 0.08 0.08 0.08 0.08 0.08 0.08 1996-02-19 00:00:00 NULL -12 2009-01-01
+NULL Huge number NULL NULL NULL 0.0 0.0 NULL NULL NULL 0.00 NULL 0.00 NULL NULL NULL 2009-12-31
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT
L_QUANTITY as Quantity,
@@ -466,7 +448,6 @@ SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT
@@ -508,7 +489,6 @@ SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
POSTHOOK: type: QUERY
Explain
PLAN VECTORIZATION:
@@ -528,7 +508,7 @@ STAGE PLANS:
Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:l_orderkey:int, 1:l_partkey:int, 2:l_suppkey:int, 3:l_linenumber:int, 4:l_quantity:int, 5:l_extendedprice:double, 6:l_discount:double, 7:l_tax:decimal(10,2), 8:l_returnflag:char(1), 9:l_linestatus:char(1), 10:l_shipdate:date, 11:l_commitdate:date, 12:l_receiptdate:date, 13:l_shipinstruct:varchar(20), 14:l_shipmode:char(10), 15:l_comment:string, 16:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:l_orderkey:int, 1:l_partkey:int, 2:l_suppkey:int, 3:l_linenumber:int, 4:l_quantity:int, 5:l_extendedprice:double, 6:l_discount:double, 7:l_tax:decimal(10,2)/DECIMAL_64, 8:l_returnflag:char(1), 9:l_linestatus:char(1), 10:l_shipdate:date, 11:l_commitdate:date, 12:l_receiptdate:date, 13:l_shipinstruct:varchar(20), 14:l_shipmode:char(10), 15:l_comment:string, 16:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: l_quantity (type: int), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE ('Huge number') END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE (null) END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN (null) ELSE (null) END (type: string), if((l_shipmode = 'SHIP '), date_add(l_shipdate, 10), date_add(l_shipdate, 5)) (type: date), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0) END (type: double), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0.0D) END (type: double), if((UDFToString(l_shipinstruct) = 'DELIVER
IN PERSON'), null, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, null) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(10,2)), if((l_partkey > 30), CAST( l_receiptdate AS TIMESTAMP), CAST( l_commitdate AS TIMESTAMP)) (type: timestamp), if((l_suppkey > 10000), datediff(l_receiptdate, l_commitdate), null) (type: int), if((l_suppkey > 10000), null, datediff(l_receiptdate, l_commitdate)) (type: int), if(((l_suppkey % 500) > 100), DATE'2009-01-01', DATE'2009-12-31') (type: date)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
@@ -536,24 +516,24 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [4, 22, 24, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 38, 40, 43, 44]
- selectExpressions: IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 21:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 22:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean, val Somecol 21:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprStringScalarStringScalar(col 20:boolean, val Many, val Huge number)(children: LongColLessLongScalar(col 4:int, val 100) -> 20:boolean) -> 21:string) -> 22:string) -> 21:string) -> 22:string, IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 23:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 24:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean, val
Somecol 23:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprColumnNull(col 20:boolean, col 21:string, null)(children: LongColLessLongScalar(col 4:int, val 100) -> 20:boolean, ConstantVectorExpression(val Many) -> 21:string) -> 23:string) -> 24:string) -> 23:string) -> 24:string, IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 23:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 25:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean, val Somecol 23:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprNullNull(null, null) -> 23:string) -> 25:string) -> 23:string) -> 25:string, IfExprLongColumnLongColumn(col 17:boolean, col 18:date, col 19:date)(children: StringGroupColEqualCharScalar(col 14:char(10), val SHIP) -> 17:boolean, VectorUDFDateAddColScalar(co
l 10:date, val 10) -> 18:date, VectorUDFDateAddColScalar(col 10:date, val 5) -> 19:date) -> 26:date, IfExprDoubleColumnLongScalar(col 17:boolean, col 28:double, val 0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 17:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 27:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 27:double) -> 28:double) -> 27:double, IfExprDoubleColumnDoubleScalar(col 17:boolean, col 29:double, val 0.0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 17:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 28:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 28:double) -> 29:double) -> 28:double, IfExprNullColumn(col 17:boolean, null, col 7)(children: StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 17:boolean, col 7:decimal(10,2)) -> 30:decimal(10,2), IfExprColumnNull(co
l 18:boolean, col 7:decimal(10,2), null)(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 18:boolean, col 7:decimal(10,2)) -> 31:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 32:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 33:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20))
-> 23:string) -> 19:boolean) -> 34:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 35:decimal(10,2), IfExprTimestampColumnColumn(col 19:boolean, col 36:timestampcol 37:timestamp)(children: LongColGreaterLongScalar(col 1:int, val 30) -> 19:boolean, CastDateToTimestamp(col 12:date) -> 36:timestamp, CastDateToTimestamp(col 11:date) -> 37:timestamp) -> 38:timestamp, IfExprColumnNull(col 19:boolean, col 39:int, null)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 19:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 39:int) -> 40:int, IfExprNullColumn(col 41:boolean, null, col 42)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 41:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 42:int) -> 43:int, IfExprLongScalarLongScalar(col
45:boolean, val 14245, val 14609)(children: LongColGreaterLongScalar(col 44:int, val 100)(children: LongColModuloLongScalar(col 2:int, val 500) -> 44:int) -> 45:boolean) -> 44:date
+ selectExpressions: IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 21:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 22:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean, val Somecol 21:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprStringScalarStringScalar(col 20:boolean, val Many, val Huge number)(children: LongColLessLongScalar(col 4:int, val 100) -> 20:boolean) -> 21:string) -> 22:string) -> 21:string) -> 22:string, IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 23:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 24:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean, val
Somecol 23:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprColumnNull(col 20:boolean, col 21:string, null)(children: LongColLessLongScalar(col 4:int, val 100) -> 20:boolean, ConstantVectorExpression(val Many) -> 21:string) -> 23:string) -> 24:string) -> 23:string) -> 24:string, IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 23:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 25:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean, val Somecol 23:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprNullNull(null, null) -> 23:string) -> 25:string) -> 23:string) -> 25:string, IfExprLongColumnLongColumn(col 17:boolean, col 18:date, col 19:date)(children: StringGroupColEqualCharScalar(col 14:char(10), val SHIP) -> 17:boolean, VectorUDFDateAddColScalar(co
l 10:date, val 10) -> 18:date, VectorUDFDateAddColScalar(col 10:date, val 5) -> 19:date) -> 26:date, IfExprDoubleColumnLongScalar(col 17:boolean, col 28:double, val 0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 17:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 27:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 27:double) -> 28:double) -> 27:double, IfExprDoubleColumnDoubleScalar(col 17:boolean, col 29:double, val 0.0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 17:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 28:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 28:double) -> 29:double) -> 28:double, IfExprNullColumn(col 17:boolean, null, col 46)(children: StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 17:boolean, ConvertDecimal64ToDecimal(col 7:decimal(10,2)/DECIMAL_64) ->
46:decimal(10,2)) -> 30:decimal(10,2), IfExprColumnNull(col 18:boolean, col 47:decimal(10,2), null)(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 18:boolean, ConvertDecimal64ToDecimal(col 7:decimal(10,2)/DECIMAL_64) -> 47:decimal(10,2)) -> 31:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 32:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 33:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: String
GroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 34:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 35:decimal(10,2), IfExprTimestampColumnColumn(col 19:boolean, col 36:timestampcol 37:timestamp)(children: LongColGreaterLongScalar(col 1:int, val 30) -> 19:boolean, CastDateToTimestamp(col 12:date) -> 36:timestamp, CastDateToTimestamp(col 11:date) -> 37:timestamp) -> 38:timestamp, IfExprColumnNull(col 19:boolean, col 39:int, null)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 19:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 39:int) -> 40:int, IfExprNullColumn(col 41:boolean, null, col 42)(children: LongColGreaterLongScalar(col 2:int, val 10000)
-> 41:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 42:int) -> 43:int, IfExprLongScalarLongScalar(col 45:boolean, val 14245, val 14609)(children: LongColGreaterLongScalar(col 44:int, val 100)(children: LongColModuloLongScalar(col 2:int, val 500) -> 44:int) -> 45:boolean) -> 44:date
Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: int)
- sort order: +
- Reduce Sink Vectorization:
- className: VectorReduceSinkOperator
+ File Output Operator
+ compressed: false
+ File Sink Vectorization:
+ className: VectorFileSinkOperator
native: false
- nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
- nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: date), _col5 (type: double), _col6 (type: double), _col7 (type: decimal(10,2)), _col8 (type: decimal(10,2)), _col9 (type: decimal(12,2)), _col10 (type: decimal(12,2)), _col11 (type: decimal(10,2)), _col12 (type: decimal(10,2)), _col13 (type: timestamp), _col14 (type: int), _col15 (type: int), _col16 (type: date)
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Execution mode: vectorized
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -561,25 +541,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 16
includeColumns: [1, 2, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14]
- dataColumns: l_orderkey:int, l_partkey:int, l_suppkey:int, l_linenumber:int, l_quantity:int, l_extendedprice:double, l_discount:double, l_tax:decimal(10,2), l_returnflag:char(1), l_linestatus:char(1), l_shipdate:date, l_commitdate:date, l_receiptdate:date, l_shipinstruct:varchar(20), l_shipmode:char(10), l_comment:string
+ dataColumns: l_orderkey:int, l_partkey:int, l_suppkey:int, l_linenumber:int, l_quantity:int, l_extendedprice:double, l_discount:double, l_tax:decimal(10,2)/DECIMAL_64, l_returnflag:char(1), l_linestatus:char(1), l_shipdate:date, l_commitdate:date, l_receiptdate:date, l_shipinstruct:varchar(20), l_shipmode:char(10), l_comment:string
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, bigint, bigint, bigint, string, string, string, string, string, bigint, double, double, double, decimal(10,2), decimal(10,2), decimal(12,2), decimal(12,2), decimal(10,2), decimal(10,2), timestamp, timestamp, timestamp, bigint, bigint, bigint, bigint, bigint, bigint, bigint]
- Reduce Vectorization:
- enabled: false
- enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
- enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: date), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: decimal(10,2)), VALUE._col7 (type: decimal(10,2)), VALUE._col8 (type: decimal(12,2)), VALUE._col9 (type: decimal(12,2)), VALUE._col10 (type: decimal(10,2)), VALUE._col11 (type: decimal(10,2)), VALUE._col12 (type: timestamp), VALUE._col13 (type: int), VALUE._col14 (type: int), VALUE._col15 (type: date)
- outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
- Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ scratchColumnTypeNames: [bigint, bigint, bigint, bigint, string, string, string, string, string, bigint, double, double, double, decimal(10,2), decimal(10,2), decimal(12,2), decimal(12,2), decimal(10,2), decimal(10,2), timestamp, timestamp, timestamp, bigint, bigint, bigint, bigint, bigint, bigint, bigint, decimal(10,2), decimal(10,2)]
Stage: Stage-0
Fetch Operator
@@ -626,7 +590,6 @@ PREHOOK: query: SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
PREHOOK: type: QUERY
PREHOOK: Input: default@lineitem_test
#### A masked pattern was here ####
@@ -669,112 +632,111 @@ POSTHOOK: query: SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
POSTHOOK: type: QUERY
POSTHOOK: Input: default@lineitem_test
#### A masked pattern was here ####
quantity quantity_description quantity_description_2 quantity_description_3 expected_date field_1 field_2 field_3 field_4 field_5 field_6 field_7 field_8 field_9 field_10 field_11 field_12
-NULL Huge number NULL NULL NULL 0.0 0.0 NULL NULL NULL 0.00 NULL 0.00 NULL NULL NULL 2009-12-31
-1 Single Single Single 1994-12-06 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-12-15 00:00:00 NULL 3 2009-01-01
1 Single Single Single 1994-01-31 0.0 0.0 0.05 0.05 0.05 0.05 0.05 0.05 1994-01-28 00:00:00 NULL -36 2009-01-01
-2 Two Two Two 1995-08-12 2011.3912000000003 2011.3912000000003 NULL NULL 0.00 0.00 0.00 0.00 1995-08-23 00:00:00 NULL -45 2009-01-01
-2 Two Two Two 1993-12-09 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-01-01 00:00:00 NULL -6 2009-01-01
-3 Some Some Some 1994-06-11 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-06-15 00:00:00 NULL -42 2009-12-31
-3 Some Some Some 1998-06-02 5137.6143 5137.6143 0.07 NULL 0.07 0.00 0.07 0.00 1998-06-02 00:00:00 NULL 60 2009-01-01
-3 Some Some Some 1998-07-09 2778.921 2778.921 0.02 NULL 0.02 0.00 0.02 0.00 1998-07-21 00:00:00 NULL 46 2009-12-31
-4 Some Some Some 1995-08-09 5990.4936 5990.4936 0.03 NULL 0.03 0.00 0.03 0.00 1995-09-03 00:00:00 NULL -28 2009-01-01
-4 Some Some Some 1997-04-27 5669.7732000000005 5669.7732000000005 0.04 NULL 0.04 0.00 0.04 0.00 1997-04-20 00:00:00 NULL 79 2009-01-01
-5 Some Some Some 1997-02-25 8116.96 8116.96 NULL NULL 0.00 0.00 0.00 0.00 1997-02-21 00:00:00 NULL 9 2009-01-01
-5 Some Some Some 1996-02-15 6217.103999999999 6217.103999999999 0.02 NULL 0.02 0.00 0.02 0.00 1996-02-13 00:00:00 NULL -42 2009-01-01
-5 Some Some Some 1993-12-14 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1993-12-23 00:00:00 NULL -2 2009-01-01
-6 Some Some Some 1998-11-04 9487.6152 9487.6152 0.06 NULL 0.06 0.00 0.06 0.00 1998-11-05 00:00:00 NULL 46 2009-12-31
-6 Some Some Some 1995-07-26 8793.2736 8793.2736 0.03 NULL 0.03 0.00 0.03 0.00 1995-07-25 00:00:00 NULL -60 2009-01-01
-7 Some Some Some 1996-01-24 12613.136199999999 12613.136199999999 0.04 NULL 0.04 0.00 0.04 0.00 1996-01-29 00:00:00 NULL 38 2009-01-01
-8 Some Some Some 1994-01-17 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-01-14 00:00:00 NULL -44 2009-01-01
-8 Some Some Some 1996-02-03 11978.640000000001 11978.640000000001 0.02 0.02 0.02 0.02 0.02 0.02 1996-01-31 00:00:00 NULL -34 2009-01-01
-9 Some Some Some 1996-02-11 10666.6272 10666.6272 0.08 0.08 0.08 0.08 0.08 0.08 1996-02-19 00:00:00 NULL -12 2009-01-01
+1 Single Single Single 1994-12-06 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-12-15 00:00:00 NULL 3 2009-01-01
11 Many Many NULL 1994-03-22 0.0 0.0 0.05 NULL 0.05 0.00 0.05 0.00 1994-03-27 00:00:00 NULL 10 2009-01-01
12 Many Many NULL 1996-05-12 12655.998 12655.998 0.03 0.03 0.03 0.03 0.03 0.03 1996-06-03 00:00:00 NULL 82 2009-01-01
12 Many Many NULL 1997-02-01 12156.034800000001 12156.034800000001 0.05 NULL 0.05 0.00 0.05 0.00 1997-02-22 00:00:00 NULL 1 2009-01-01
+13 Many Many NULL 1993-04-06 0.0 0.0 0.02 NULL 0.02 0.00 0.02 0.00 1993-04-08 00:00:00 NULL 4 2009-01-01
13 Many Many NULL 1994-03-08 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-03-26 00:00:00 NULL 41 2009-01-01
13 Many Many NULL 1998-10-28 17554.68 17554.68 0.07 NULL 0.07 0.00 0.07 0.00 1998-11-06 00:00:00 NULL 53 2009-01-01
-13 Many Many NULL 1993-04-06 0.0 0.0 0.02 NULL 0.02 0.00 0.02 0.00 1993-04-08 00:00:00 NULL 4 2009-01-01
14 Many Many NULL 1995-01-04 0.0 0.0 0.02 NULL 0.02 0.00 0.02 0.00 1995-01-27 00:00:00 NULL 66 2009-01-01
15 Many Many NULL 1994-11-05 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-11-20 00:00:00 NULL 81 2009-12-31
-17 Many Many NULL 1996-03-18 20321.500799999998 20321.500799999998 NULL NULL 0.00 0.00 0.00 0.00 1996-03-22 00:00:00 NULL 39 2009-01-01
17 Many Many NULL 1994-07-07 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1994-07-03 00:00:00 NULL -4 2009-01-01
+17 Many Many NULL 1996-03-18 20321.500799999998 20321.500799999998 NULL NULL 0.00 0.00 0.00 0.00 1996-03-22 00:00:00 NULL 39 2009-01-01
19 Many Many NULL 1993-05-19 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1993-05-25 00:00:00 NULL 81 2009-01-01
19 Many Many NULL 1994-02-05 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1994-02-06 00:00:00 NULL -11 2009-01-01
+2 Two Two Two 1993-12-09 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-01-01 00:00:00 NULL -6 2009-01-01
+2 Two Two Two 1995-08-12 2011.3912000000003 2011.3912000000003 NULL NULL 0.00 0.00 0.00 0.00 1995-08-23 00:00:00 NULL -45 2009-01-01
20 Many Many NULL 1998-07-02 32042.592 32042.592 0.01 NULL 0.01 0.00 0.01 0.00 1998-07-02 00:00:00 NULL 40 2009-01-01
-21 Many Many NULL 1995-07-11 24640.0518 24640.0518 NULL NULL 0.00 0.00 0.00 0.00 1995-07-31 00:00:00 NULL 78 2009-01-01
21 Many Many NULL 1994-10-05 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-10-26 00:00:00 NULL 38 2009-01-01
-22 Many Many NULL 1998-10-14 28405.0184 28405.0184 0.06 NULL 0.06 0.00 0.06 0.00 1998-10-12 00:00:00 NULL -4 2009-01-01
+21 Many Many NULL 1995-07-11 24640.0518 24640.0518 NULL NULL 0.00 0.00 0.00 0.00 1995-07-31 00:00:00 NULL 78 2009-01-01
22 Many Many NULL 1995-07-22 39353.82 39353.82 0.05 NULL 0.05 0.00 0.05 0.00 1995-07-19 00:00:00 NULL 45 2009-01-01
-23 Many Many NULL 1997-04-24 33946.3785 33946.3785 NULL NULL 0.00 0.00 0.00 0.00 1997-05-06 00:00:00 NULL 81 2009-01-01
-23 Many Many NULL 1994-10-13 0.0 0.0 0.00 NULL 0.00 0.00 0.00 0.00 1994-10-24 00:00:00 NULL 79 2009-12-31
+22 Many Many NULL 1998-10-14 28405.0184 28405.0184 0.06 NULL 0.06 0.00 0.06 0.00 1998-10-12 00:00:00 NULL -4 2009-01-01
23 Many Many NULL 1994-07-24 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-07-25 00:00:00 NULL 26 2009-01-01
-24 Many Many NULL 1996-04-04 20542.032 20542.032 0.04 NULL 0.04 0.00 0.04 0.00 1996-04-01 00:00:00 NULL 18 2009-12-31
+23 Many Many NULL 1994-10-13 0.0 0.0 0.00 NULL 0.00 0.00 0.00 0.00 1994-10-24 00:00:00 NULL 79 2009-12-31
+23 Many Many NULL 1997-04-24 33946.3785 33946.3785 NULL NULL 0.00 0.00 0.00 0.00 1997-05-06 00:00:00 NULL 81 2009-01-01
24 Many Many NULL 1996-02-26 31762.584 31762.584 0.00 0.00 0.00 0.00 0.00 0.00 1996-03-18 00:00:00 NULL 75 2009-01-01
-25 Many Many NULL 1998-04-15 43064.1575 43064.1575 0.07 NULL 0.07 0.00 0.07 0.00 1998-04-11 00:00:00 NULL -11 2009-01-01
+24 Many Many NULL 1996-04-04 20542.032 20542.032 0.04 NULL 0.04 0.00 0.04 0.00 1996-04-01 00:00:00 NULL 18 2009-12-31
25 Many Many NULL 1995-12-06 27263.995 27263.995 NULL NULL 0.00 0.00 0.00 0.00 1995-12-21 00:00:00 NULL -4 2009-01-01
-26 Many Many NULL 1996-11-09 39912.433600000004 39912.433600000004 0.04 NULL 0.04 0.00 0.04 0.00 1996-11-20 00:00:00 NULL 31 2009-01-01
-26 Many Many NULL 1995-04-25 0.0 0.0 0.03 NULL 0.03 0.00 0.03 0.00 1995-05-13 00:00:00 NULL 18 2009-01-01
-26 Many Many NULL 1994-10-21 0.0 0.0 0.08 NULL 0.08 0.00 0.08 0.00 1994-10-19 00:00:00 NULL 24 2009-01-01
+25 Many Many NULL 1998-04-15 43064.1575 43064.1575 0.07 NULL 0.07 0.00 0.07 0.00 1998-04-11 00:00:00 NULL -11 2009-01-01
26 Many Many NULL 1993-11-03 0.0 0.0 0.02 0.02 0.02 0.02 0.02 0.02 1993-11-04 00:00:00 NULL -44 2009-01-01
+26 Many Many NULL 1994-10-21 0.0 0.0 0.08 NULL 0.08 0.00 0.08 0.00 1994-10-19 00:00:00 NULL 24 2009-01-01
+26 Many Many NULL 1995-04-25 0.0 0.0 0.03 NULL 0.03 0.00 0.03 0.00 1995-05-13 00:00:00 NULL 18 2009-01-01
+26 Many Many NULL 1996-11-09 39912.433600000004 39912.433600000004 0.04 NULL 0.04 0.00 0.04 0.00 1996-11-20 00:00:00 NULL 31 2009-01-01
27 Many Many NULL 1994-01-26 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-01-23 00:00:00 NULL 62 2009-01-01
27 Many Many NULL 1998-06-29 45590.2425 45590.2425 NULL NULL 0.00 0.00 0.00 0.00 1998-06-29 00:00:00 NULL 4 2009-01-01
-28 Many Many NULL 1995-10-28 44866.219999999994 44866.219999999994 0.08 0.08 0.08 0.08 0.08 0.08 1995-10-26 00:00:00 NULL 60 2009-01-01
-28 Many Many NULL 1994-12-29 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1995-01-16 00:00:00 NULL 83 2009-01-01
-28 Many Many NULL 1996-04-26 26349.6324 26349.6324 0.06 NULL 0.06 0.00 0.06 0.00 1996-05-16 00:00:00 NULL 47 2009-01-01
-28 Many Many NULL 1996-03-26 30855.6612 30855.6612 0.04 NULL 0.04 0.00 0.04 0.00 1996-04-20 00:00:00 NULL 12 2009-12-31
28 Many Many NULL 1993-12-19 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1994-01-01 00:00:00 NULL -9 2009-01-01
+28 Many Many NULL 1994-12-29 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1995-01-16 00:00:00 NULL 83 2009-01-01
+28 Many Many NULL 1995-10-28 44866.219999999994 44866.219999999994 0.08 0.08 0.08 0.08 0.08 0.08 1995-10-26 00:00:00 NULL 60 2009-01-01
28 Many Many NULL 1996-02-06 45975.3616 45975.3616 0.02 NULL 0.02 0.00 0.02 0.00 1996-02-28 00:00:00 NULL 66 2009-01-01
+28 Many Many NULL 1996-03-26 30855.6612 30855.6612 0.04 NULL 0.04 0.00 0.04 0.00 1996-04-20 00:00:00 NULL 12 2009-12-31
+28 Many Many NULL 1996-04-26 26349.6324 26349.6324 0.06 NULL 0.06 0.00 0.06 0.00 1996-05-16 00:00:00 NULL 47 2009-01-01
29 Many Many NULL 1997-01-30 39341.806 39341.806 NULL NULL 0.00 0.00 0.00 0.00 1997-01-27 00:00:00 NULL 0 2009-01-01
+3 Some Some Some 1994-06-11 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-06-15 00:00:00 NULL -42 2009-12-31
+3 Some Some Some 1998-06-02 5137.6143 5137.6143 0.07 NULL 0.07 0.00 0.07 0.00 1998-06-02 00:00:00 NULL 60 2009-01-01
+3 Some Some Some 1998-07-09 2778.921 2778.921 0.02 NULL 0.02 0.00 0.02 0.00 1998-07-21 00:00:00 NULL 46 2009-12-31
30 Many Many NULL 1994-06-08 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-06-22 00:00:00 NULL 24 2009-01-01
30 Many Many NULL 1996-01-15 29770.173 29770.173 NULL NULL 0.00 0.00 0.00 0.00 1996-01-18 00:00:00 NULL 35 2009-12-31
30 Many Many NULL 1998-08-16 44561.46 44561.46 0.06 NULL 0.06 0.00 0.06 0.00 1998-08-14 00:00:00 NULL 34 2009-12-31
-31 Many Many NULL 1994-02-24 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-02-20 00:00:00 NULL -19 2009-01-01
31 Many Many NULL 1993-11-03 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1993-11-08 00:00:00 NULL -41 2009-01-01
-32 Many Many NULL 1995-08-19 63313.3312 63313.3312 0.00 NULL 0.00 0.00 0.00 0.00 1995-08-27 00:00:00 NULL -41 2009-01-01
+31 Many Many NULL 1994-02-24 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-02-20 00:00:00 NULL -19 2009-01-01
32 Many Many NULL 1993-12-14 0.0 0.0 0.05 NULL 0.05 0.00 0.05 0.00 1993-12-28 00:00:00 NULL -7 2009-12-31
32 Many Many NULL 1994-08-29 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-08-31 00:00:00 NULL 14 2009-01-01
-32 Many Many NULL 1996-10-07 44955.15839999999 44955.15839999999 0.05 NULL 0.05 0.00 0.05 0.00 1996-10-14 00:00:00 NULL -66 2009-12-31
+32 Many Many NULL 1995-08-19 63313.3312 63313.3312 0.00 NULL 0.00 0.00 0.00 0.00 1995-08-27 00:00:00 NULL -41 2009-01-01
32 Many Many NULL 1996-02-04 46146.7488 46146.7488 NULL NULL 0.00 0.00 0.00 0.00 1996-02-03 00:00:00 NULL -4 2009-01-01
+32 Many Many NULL 1996-10-07 44955.15839999999 44955.15839999999 0.05 NULL 0.05 0.00 0.05 0.00 1996-10-14 00:00:00 NULL -66 2009-12-31
33 Many Many NULL 1998-04-17 54174.12 54174.12 0.01 NULL 0.01 0.00 0.01 0.00 1998-04-15 00:00:00 NULL 26 2009-01-01
-34 Many Many NULL 1998-03-10 56487.763199999994 56487.763199999994 NULL NULL 0.00 0.00 0.00 0.00 1998-03-30 00:00:00 NULL -23 2009-01-01
-34 Many Many NULL 1996-01-27 63982.002400000005 63982.002400000005 NULL NULL 0.00 0.00 0.00 0.00 1996-01-27 00:00:00 NULL 21 2009-01-01
34 Many Many NULL 1995-11-13 60586.5448 60586.5448 0.06 NULL 0.06 0.00 0.06 0.00 1995-11-26 00:00:00 NULL -50 2009-01-01
+34 Many Many NULL 1996-01-27 63982.002400000005 63982.002400000005 NULL NULL 0.00 0.00 0.00 0.00 1996-01-27 00:00:00 NULL 21 2009-01-01
+34 Many Many NULL 1998-03-10 56487.763199999994 56487.763199999994 NULL NULL 0.00 0.00 0.00 0.00 1998-03-30 00:00:00 NULL -23 2009-01-01
35 Many Many NULL 1996-01-21 40475.225 40475.225 0.03 0.03 0.03 0.03 0.03 0.03 1996-01-22 00:00:00 NULL -32 2009-01-01
36 Many Many NULL 1996-04-17 41844.6756 41844.6756 0.06 0.06 0.06 0.06 0.06 0.06 1996-04-20 00:00:00 NULL 52 2009-01-01
-37 Many Many NULL 1994-02-18 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-02-21 00:00:00 NULL -23 2009-01-01
-37 Many Many NULL 1993-04-23 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1993-04-14 00:00:00 NULL 15 2009-12-31
37 Many Many NULL 1992-05-02 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1992-05-02 00:00:00 NULL -13 2009-01-01
-38 Many Many NULL 1997-02-02 44694.46 44694.46 0.05 0.05 0.05 0.05 0.05 0.05 1997-02-02 00:00:00 NULL 19 2009-01-01
+37 Many Many NULL 1993-04-23 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1993-04-14 00:00:00 NULL 15 2009-12-31
+37 Many Many NULL 1994-02-18 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-02-21 00:00:00 NULL -23 2009-01-01
38 Many Many NULL 1996-02-16 68028.3144 68028.3144 NULL NULL 0.00 0.00 0.00 0.00 1996-02-18 00:00:00 NULL -6 2009-01-01
-39 Many Many NULL 1998-02-03 45146.01 45146.01 NULL NULL 0.00 0.00 0.00 0.00 1998-02-18 00:00:00 NULL -48 2009-01-01
+38 Many Many NULL 1997-02-02 44694.46 44694.46 0.05 0.05 0.05 0.05 0.05 0.05 1997-02-02 00:00:00 NULL 19 2009-01-01
39 Many Many NULL 1992-07-07 0.0 0.0 0.02 0.02 0.02 0.02 0.02 0.02 1992-07-28 00:00:00 NULL -21 2009-01-01
-40 Many Many NULL 1996-12-13 51224.736 51224.736 0.05 NULL 0.05 0.00 0.05 0.00 1997-01-01 00:00:00 NULL 71 2009-01-01
+39 Many Many NULL 1998-02-03 45146.01 45146.01 NULL NULL 0.00 0.00 0.00 0.00 1998-02-18 00:00:00 NULL -48 2009-01-01
+4 Some Some Some 1995-08-09 5990.4936 5990.4936 0.03 NULL 0.03 0.00 0.03 0.00 1995-09-03 00:00:00 NULL -28 2009-01-01
+4 Some Some Some 1997-04-27 5669.7732000000005 5669.7732000000005 0.04 NULL 0.04 0.00 0.04 0.00 1997-04-20 00:00:00 NULL 79 2009-01-01
40 Many Many NULL 1992-07-26 0.0 0.0 0.03 NULL 0.03 0.00 0.03 0.00 1992-08-15 00:00:00 NULL 14 2009-01-01
-41 Many Many NULL 1998-07-04 47989.6144 47989.6144 0.08 NULL 0.08 0.00 0.08 0.00 1998-07-06 00:00:00 NULL 9 2009-01-01
-41 Many Many NULL 1994-02-26 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1994-03-18 00:00:00 NULL 17 2009-01-01
+40 Many Many NULL 1996-12-13 51224.736 51224.736 0.05 NULL 0.05 0.00 0.05 0.00 1997-01-01 00:00:00 NULL 71 2009-01-01
41 Many Many NULL 1993-11-14 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1993-11-11 00:00:00 NULL -74 2009-01-01
+41 Many Many NULL 1994-02-26 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1994-03-18 00:00:00 NULL 17 2009-01-01
+41 Many Many NULL 1998-07-04 47989.6144 47989.6144 0.08 NULL 0.08 0.00 0.08 0.00 1998-07-06 00:00:00 NULL 9 2009-01-01
42 Many Many NULL 1994-08-05 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-08-28 00:00:00 NULL 33 2009-12-31
42 Many Many NULL 1996-02-13 68289.9672 68289.9672 0.00 NULL 0.00 0.00 0.00 0.00 1996-02-23 00:00:00 NULL 33 2009-01-01
-43 Many Many NULL 1996-10-22 62727.3207 62727.3207 0.01 NULL 0.01 0.00 0.01 0.00 1996-10-26 00:00:00 NULL -19 2009-12-31
43 Many Many NULL 1992-07-15 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1992-08-02 00:00:00 NULL 27 2009-01-01
-44 Many Many NULL 1997-03-23 60781.124800000005 60781.124800000005 NULL NULL 0.00 0.00 0.00 0.00 1997-04-13 00:00:00 NULL 74 2009-12-31
-44 Many Many NULL 1996-10-04 80882.4192 80882.4192 0.02 NULL 0.02 0.00 0.02 0.00 1996-09-30 00:00:00 NULL -48 2009-01-01
+43 Many Many NULL 1996-10-22 62727.3207 62727.3207 0.01 NULL 0.01 0.00 0.01 0.00 1996-10-26 00:00:00 NULL -19 2009-12-31
44 Many Many NULL 1995-09-02 75106.658 75106.658 NULL NULL 0.00 0.00 0.00 0.00 1995-09-14 00:00:00 NULL 25 2009-01-01
+44 Many Many NULL 1996-10-04 80882.4192 80882.4192 0.02 NULL 0.02 0.00 0.02 0.00 1996-09-30 00:00:00 NULL -48 2009-01-01
44 Many Many NULL 1996-11-19 48941.692800000004 48941.692800000004 0.06 NULL 0.06 0.00 0.06 0.00 1996-12-12 00:00:00 NULL -3 2009-01-01
-45 Many Many NULL 1998-03-05 61489.35 61489.35 NULL NULL 0.00 0.00 0.00 0.00 1998-03-24 00:00:00 NULL 4 2009-01-01
+44 Many Many NULL 1997-03-23 60781.124800000005 60781.124800000005 NULL NULL 0.00 0.00 0.00 0.00 1997-04-13 00:00:00 NULL 74 2009-12-31
45 Many Many NULL 1994-02-07 0.0 0.0 0.00 NULL 0.00 0.00 0.00 0.00 1994-02-23 00:00:00 NULL 50 2009-01-01
+45 Many Many NULL 1998-03-05 61489.35 61489.35 NULL NULL 0.00 0.00 0.00 0.00 1998-03-24 00:00:00 NULL 4 2009-01-01
46 Many Many NULL 1996-01-20 73475.892 73475.892 0.07 NULL 0.07 0.00 0.07 0.00 1996-02-03 00:00:00 NULL -53 2009-01-01
+46 Many Many NULL 1996-10-01 77781.4092 77781.4092 NULL NULL 0.00 0.00 0.00 0.00 1996-10-26 00:00:00 NULL -54 2009-01-01
46 Many Many NULL 1998-07-01 56583.5144 56583.5144 0.05 NULL 0.05 0.00 0.05 0.00 1998-07-05 00:00:00 NULL 28 2009-01-01
46 Many Many NULL 1998-08-18 84565.5168 84565.5168 0.05 NULL 0.05 0.00 0.05 0.00 1998-08-29 00:00:00 NULL 52 2009-01-01
-46 Many Many NULL 1996-10-01 77781.4092 77781.4092 NULL NULL 0.00 0.00 0.00 0.00 1996-10-26 00:00:00 NULL -54 2009-01-01
48 Many Many NULL 1994-08-22 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1994-09-08 00:00:00 NULL 28 2009-01-01
49 Many Many NULL 1993-11-14 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1993-11-24 00:00:00 NULL -26 2009-12-31
+5 Some Some Some 1993-12-14 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1993-12-23 00:00:00 NULL -2 2009-01-01
+5 Some Some Some 1996-02-15 6217.103999999999 6217.103999999999 0.02 NULL 0.02 0.00 0.02 0.00 1996-02-13 00:00:00 NULL -42 2009-01-01
+5 Some Some Some 1997-02-25 8116.96 8116.96 NULL NULL 0.00 0.00 0.00 0.00 1997-02-21 00:00:00 NULL 9 2009-01-01
50 Many Many NULL 1994-08-13 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-08-26 00:00:00 NULL -48 2009-12-31
+6 Some Some Some 1995-07-26 8793.2736 8793.2736 0.03 NULL 0.03 0.00 0.03 0.00 1995-07-25 00:00:00 NULL -60 2009-01-01
+6 Some Some Some 1998-11-04 9487.6152 9487.6152 0.06 NULL 0.06 0.00 0.06 0.00 1998-11-05 00:00:00 NULL 46 2009-12-31
+7 Some Some Some 1996-01-24 12613.136199999999 12613.136199999999 0.04 NULL 0.04 0.00 0.04 0.00 1996-01-29 00:00:00 NULL 38 2009-01-01
+8 Some Some Some 1994-01-17 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-01-14 00:00:00 NULL -44 2009-01-01
+8 Some Some Some 1996-02-03 11978.640000000001 11978.640000000001 0.02 0.02 0.02 0.02 0.02 0.02 1996-01-31 00:00:00 NULL -34 2009-01-01
+9 Some Some Some 1996-02-11 10666.6272 10666.6272 0.08 0.08 0.08 0.08 0.08 0.08 1996-02-19 00:00:00 NULL -12 2009-01-01
+NULL Huge number NULL NULL NULL 0.0 0.0 NULL NULL NULL 0.00 NULL 0.00 NULL NULL NULL 2009-12-31
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT
L_QUANTITY as Quantity,
@@ -815,7 +777,6 @@ SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT
@@ -857,7 +818,6 @@ SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
POSTHOOK: type: QUERY
Explain
PLAN VECTORIZATION:
@@ -877,7 +837,7 @@ STAGE PLANS:
Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:l_orderkey:int, 1:l_partkey:int, 2:l_suppkey:int, 3:l_linenumber:int, 4:l_quantity:int, 5:l_extendedprice:double, 6:l_discount:double, 7:l_tax:decimal(10,2), 8:l_returnflag:char(1), 9:l_linestatus:char(1), 10:l_shipdate:date, 11:l_commitdate:date, 12:l_receiptdate:date, 13:l_shipinstruct:varchar(20), 14:l_shipmode:char(10), 15:l_comment:string, 16:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:l_orderkey:int, 1:l_partkey:int, 2:l_suppkey:int, 3:l_linenumber:int, 4:l_quantity:int, 5:l_extendedprice:double, 6:l_discount:double, 7:l_tax:decimal(10,2)/DECIMAL_64, 8:l_returnflag:char(1), 9:l_linestatus:char(1), 10:l_shipdate:date, 11:l_commitdate:date, 12:l_receiptdate:date, 13:l_shipinstruct:varchar(20), 14:l_shipmode:char(10), 15:l_comment:string, 16:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: l_quantity (type: int), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE ('Huge number') END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE (null) END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN (null) ELSE (null) END (type: string), if((l_shipmode = 'SHIP '), date_add(l_shipdate, 10), date_add(l_shipdate, 5)) (type: date), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0) END (type: double), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0.0D) END (type: double), if((UDFToString(l_shipinstruct) = 'DELIVER
IN PERSON'), null, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, null) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(10,2)), if((l_partkey > 30), CAST( l_receiptdate AS TIMESTAMP), CAST( l_commitdate AS TIMESTAMP)) (type: timestamp), if((l_suppkey > 10000), datediff(l_receiptdate, l_commitdate), null) (type: int), if((l_suppkey > 10000), null, datediff(l_receiptdate, l_commitdate)) (type: int), if(((l_suppkey % 500) > 100), DATE'2009-01-01', DATE'2009-12-31') (type: date)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
@@ -885,24 +845,24 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [4, 27, 38, 48, 52, 54, 60, 63, 65, 67, 68, 69, 70, 73, 76, 79, 80]
- selectExpressions: IfExprColumnCondExpr(col 17:boolean, col 18:stringcol 26:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, ConstantVectorExpression(val Single) -> 18:string, IfExprColumnCondExpr(col 19:boolean, col 20:stringcol 25:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 19:boolean, ConstantVectorExpression(val Two) -> 20:string, IfExprColumnCondExpr(col 21:boolean, col 22:stringcol 24:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 21:boolean, ConstantVectorExpression(val Some) -> 22:string, IfExprStringScalarStringScalar(col 23:boolean, val Many, val Huge number)(children: LongColLessLongScalar(col 4:int, val 100) -> 23:boolean) -> 24:string) -> 25:string) -> 26:string) -> 27:string, IfExprColumnCondExpr(col 23:boolean, col 28:stringcol 37:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 23:boolean, ConstantVectorExpression(val Single) -> 28:string, IfExprColumnCondExpr(col 29:boolean, col
30:stringcol 36:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 29:boolean, ConstantVectorExpression(val Two) -> 30:string, IfExprColumnCondExpr(col 31:boolean, col 32:stringcol 35:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 31:boolean, ConstantVectorExpression(val Some) -> 32:string, IfExprColumnNull(col 33:boolean, col 34:string, null)(children: LongColLessLongScalar(col 4:int, val 100) -> 33:boolean, ConstantVectorExpression(val Many) -> 34:string) -> 35:string) -> 36:string) -> 37:string) -> 38:string, IfExprColumnCondExpr(col 39:boolean, col 40:stringcol 47:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 39:boolean, ConstantVectorExpression(val Single) -> 40:string, IfExprColumnCondExpr(col 41:boolean, col 42:stringcol 46:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 41:boolean, ConstantVectorExpression(val Two) -> 42:string, IfExprColumnCondExpr(col 43:boolean, col 44:stringcol 45:string)(children: LongColLessLongS
calar(col 4:int, val 10) -> 43:boolean, ConstantVectorExpression(val Some) -> 44:string, IfExprNullNull(null, null) -> 45:string) -> 46:string) -> 47:string) -> 48:string, IfExprCondExprCondExpr(col 49:boolean, col 50:datecol 51:date)(children: StringGroupColEqualCharScalar(col 14:char(10), val SHIP) -> 49:boolean, VectorUDFDateAddColScalar(col 10:date, val 10) -> 50:date, VectorUDFDateAddColScalar(col 10:date, val 5) -> 51:date) -> 52:date, IfExprDoubleColumnLongScalar(col 57:boolean, col 58:double, val 0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 57:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 54:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 54:double) -> 58:double) -> 54:double, IfExprCondExprColumn(col 57:boolean, col 59:double, col 58:double)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 57:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 58:double)(children: DoubleScalarSubtractDoubleC
olumn(val 1.0, col 6:double) -> 58:double) -> 59:double, ConstantVectorExpression(val 0.0) -> 58:double) -> 60:double, IfExprNullColumn(col 62:boolean, null, col 7)(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 62:boolean, col 7:decimal(10,2)) -> 63:decimal(10,2), IfExprColumnNull(col 64:boolean, col 7:decimal(10,2), null)(children: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 64:boolean, col 7:decimal(10,2)) -> 65:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 67:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: St
ringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 68:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 69:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 70:decimal(10,2), IfExprCondExprCondExpr(col 66:boolean, col 71:timestampcol 72:timestamp)(children: LongColGreaterLongScalar(col 1:int, val 30) -> 66:boolean, CastDateToTimestamp(col 12:date) -> 71:timestamp, CastDateToTimestamp(col 11:date) -> 72:timestamp) -> 73:timestamp, IfExprCondExprNull(col 74:boolean
, col 75:int, null)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 74:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 75:int) -> 76:int, IfExprNullCondExpr(col 77:boolean, null, col 78:int)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 77:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 78:int) -> 79:int, IfExprLongScalarLongScalar(col 81:boolean, val 14245, val 14609)(children: LongColGreaterLongScalar(col 80:int, val 100)(children: LongColModuloLongScalar(col 2:int, val 500) -> 80:int) -> 81:boolean) -> 80:date
+ selectExpressions: IfExprColumnCondExpr(col 17:boolean, col 18:stringcol 26:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, ConstantVectorExpression(val Single) -> 18:string, IfExprColumnCondExpr(col 19:boolean, col 20:stringcol 25:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 19:boolean, ConstantVectorExpression(val Two) -> 20:string, IfExprColumnCondExpr(col 21:boolean, col 22:stringcol 24:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 21:boolean, ConstantVectorExpression(val Some) -> 22:string, IfExprStringScalarStringScalar(col 23:boolean, val Many, val Huge number)(children: LongColLessLongScalar(col 4:int, val 100) -> 23:boolean) -> 24:string) -> 25:string) -> 26:string) -> 27:string, IfExprColumnCondExpr(col 23:boolean, col 28:stringcol 37:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 23:boolean, ConstantVectorExpression(val Single) -> 28:string, IfExprColumnCondExpr(col 29:boolean, col
30:stringcol 36:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 29:boolean, ConstantVectorExpression(val Two) -> 30:string, IfExprColumnCondExpr(col 31:boolean, col 32:stringcol 35:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 31:boolean, ConstantVectorExpression(val Some) -> 32:string, IfExprColumnNull(col 33:boolean, col 34:string, null)(children: LongColLessLongScalar(col 4:int, val 100) -> 33:boolean, ConstantVectorExpression(val Many) -> 34:string) -> 35:string) -> 36:string) -> 37:string) -> 38:string, IfExprColumnCondExpr(col 39:boolean, col 40:stringcol 47:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 39:boolean, ConstantVectorExpression(val Single) -> 40:string, IfExprColumnCondExpr(col 41:boolean, col 42:stringcol 46:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 41:boolean, ConstantVectorExpression(val Two) -> 42:string, IfExprColumnCondExpr(col 43:boolean, col 44:stringcol 45:string)(children: LongColLessLongS
calar(col 4:int, val 10) -> 43:boolean, ConstantVectorExpression(val Some) -> 44:string, IfExprNullNull(null, null) -> 45:string) -> 46:string) -> 47:string) -> 48:string, IfExprCondExprCondExpr(col 49:boolean, col 50:datecol 51:date)(children: StringGroupColEqualCharScalar(col 14:char(10), val SHIP) -> 49:boolean, VectorUDFDateAddColScalar(col 10:date, val 10) -> 50:date, VectorUDFDateAddColScalar(col 10:date, val 5) -> 51:date) -> 52:date, IfExprDoubleColumnLongScalar(col 57:boolean, col 58:double, val 0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 57:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 54:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 54:double) -> 58:double) -> 54:double, IfExprCondExprColumn(col 57:boolean, col 59:double, col 58:double)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 57:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 58:double)(children: DoubleScalarSubtractDoubleC
olumn(val 1.0, col 6:double) -> 58:double) -> 59:double, ConstantVectorExpression(val 0.0) -> 58:double) -> 60:double, IfExprNullColumn(col 62:boolean, null, col 82)(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 62:boolean, ConvertDecimal64ToDecimal(col 7:decimal(10,2)/DECIMAL_64) -> 82:decimal(10,2)) -> 63:decimal(10,2), IfExprColumnNull(col 64:boolean, col 83:decimal(10,2), null)(children: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 64:boolean, ConvertDecimal64ToDecimal(col 7:decimal(10,2)/DECIMAL_64) -> 83:decimal(10,2)) -> 65:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean
) -> 67:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 68:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 69:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 70:decimal(10,2), IfExprCondExprCondExpr(col 66:boolean, col 71:timestampcol 72:timestamp)(children: LongColGreaterLongScalar(col 1:int, val 30) -> 66:boolean, CastDateToTimestamp(col 12:date)
-> 71:timestamp, CastDateToTimestamp(col 11:date) -> 72:timestamp) -> 73:timestamp, IfExprCondExprNull(col 74:boolean, col 75:int, null)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 74:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 75:int) -> 76:int, IfExprNullCondExpr(col 77:boolean, null, col 78:int)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 77:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 78:int) -> 79:int, IfExprLongScalarLongScalar(col 81:boolean, val 14245, val 14609)(children: LongColGreaterLongScalar(col 80:int, val 100)(children: LongColModuloLongScalar(col 2:int, val 500) -> 80:int) -> 81:boolean) -> 80:date
Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: int)
- sort order: +
- Reduce Sink Vectorization:
- className: VectorReduceSinkOperator
+ File Output Operator
+ compressed: false
+ File Sink Vectorization:
+ className: VectorFileSinkOperator
native: false
- nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
- nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: date), _col5 (type: double), _col6 (type: double), _col7 (type: decimal(10,2)), _col8 (type: decimal(10,2)), _col9 (type: decimal(12,2)), _col10 (type: decimal(12,2)), _col11 (type: decimal(10,2)), _col12 (type: decimal(10,2)), _col13 (type: timestamp), _col14 (type: int), _col15 (type: int), _col16 (type: date)
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Execution mode: vectorized
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -910,25 +870,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 16
includeColumns: [1, 2, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14]
- dataColumns: l_orderkey:int, l_partkey:int, l_suppkey:int, l_linenumber:int, l_quantity:int, l_extendedprice:double, l_discount:double, l_tax:decimal(10,2), l_returnflag:char(1), l_linestatus:char(1), l_shipdate:date, l_commitdate:date, l_receiptdate:date, l_shipinstruct:varchar(20), l_shipmode:char(10), l_comment:string
+ dataColumns: l_orderkey:int, l_partkey:int, l_suppkey:int, l_linenumber:int, l_quantity:int, l_extendedprice:double, l_discount:double, l_tax:decimal(10,2)/DECIMAL_64, l_returnflag:char(1), l_linestatus:char(1), l_shipdate:date, l_commitdate:date, l_receiptdate:date, l_shipinstruct:varchar(20), l_shipmode:char(10), l_comment:string
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, string, bigint, string, bigint, string, bigint, string, string, string, string, string, bigint, string, bigint, string, bigint, string, string, string, string, string, bigint, string, bigint, string, bigint, string, string, string, string, string, bigint, bigint, bigint, bigint, bigint, double, double, bigint, bigint, double, double, double, string, bigint, decimal(10,2), bigint, decimal(10,2), bigint, decimal(12,2), decimal(12,2), decimal(10,2), decimal(10,2), timestamp, timestamp, timestamp, bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint]
- Reduce Vectorization:
- enabled: false
- enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
- enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: date), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: decimal(10,2)), VALUE._col7 (type: decimal(10,2)), VALUE._col8 (type: decimal(12,2)), VALUE._col9 (type: decimal(12,2)), VALUE._col10 (type: decimal(10,2)), VALUE._col11 (type: decimal(10,2)), VALUE._col12 (type: timestamp), VALUE._col13 (type: int), VALUE._col14 (type: int), VALUE._col15 (type: date)
- outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
- Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 101 Data size: 78500 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ scratchColumnTypeNames: [bigint, string, bigint, string, bigint, string, bigint, string, string, string, string, string, bigint, string, bigint, string, bigint, string, string, string, string, string, bigint, string, bigint, string, bigint, string, string, string, string, string, bigint, bigint, bigint, bigint, bigint, double, double, bigint, bigint, double, double, double, string, bigint, decimal(10,2), bigint, decimal(10,2), bigint, decimal(12,2), decimal(12,2), decimal(10,2), decimal(10,2), timestamp, timestamp, timestamp, bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint, decimal(10,2), decimal(10,2)]
Stage: Stage-0
Fetch Operator
@@ -975,7 +919,6 @@ PREHOOK: query: SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
PREHOOK: type: QUERY
PREHOOK: Input: default@lineitem_test
#### A masked pattern was here ####
@@ -1018,109 +961,108 @@ POSTHOOK: query: SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
POSTHOOK: type: QUERY
POSTHOOK: Input: default@lineitem_test
#### A masked pattern was here ####
quantity quantity_description quantity_description_2 quantity_description_3 expected_date field_1 field_2 field_3 field_4 field_5 field_6 field_7 field_8 field_9 field_10 field_11 field_12
-NULL Huge number NULL NULL NULL 0.0 0.0 NULL NULL NULL 0.00 NULL 0.00 NULL NULL NULL 2009-12-31
-1 Single Single Single 1994-12-06 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-12-15 00:00:00 NULL 3 2009-01-01
1 Single Single Single 1994-01-31 0.0 0.0 0.05 0.05 0.05 0.05 0.05 0.05 1994-01-28 00:00:00 NULL -36 2009-01-01
-2 Two Two Two 1995-08-12 2011.3912000000003 2011.3912000000003 NULL NULL 0.00 0.00 0.00 0.00 1995-08-23 00:00:00 NULL -45 2009-01-01
-2 Two Two Two 1993-12-09 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-01-01 00:00:00 NULL -6 2009-01-01
-3 Some Some Some 1994-06-11 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-06-15 00:00:00 NULL -42 2009-12-31
-3 Some Some Some 1998-06-02 5137.6143 5137.6143 0.07 NULL 0.07 0.00 0.07 0.00 1998-06-02 00:00:00 NULL 60 2009-01-01
-3 Some Some Some 1998-07-09 2778.921 2778.921 0.02 NULL 0.02 0.00 0.02 0.00 1998-07-21 00:00:00 NULL 46 2009-12-31
-4 Some Some Some 1995-08-09 5990.4936 5990.4936 0.03 NULL 0.03 0.00 0.03 0.00 1995-09-03 00:00:00 NULL -28 2009-01-01
-4 Some Some Some 1997-04-27 5669.7732000000005 5669.7732000000005 0.04 NULL 0.04 0.00 0.04 0.00 1997-04-20 00:00:00 NULL 79 2009-01-01
-5 Some Some Some 1997-02-25 8116.96 8116.96 NULL NULL 0.00 0.00 0.00 0.00 1997-02-21 00:00:00 NULL 9 2009-01-01
-5 Some Some Some 1996-02-15 6217.103999999999 6217.103999999999 0.02 NULL 0.02 0.00 0.02 0.00 1996-02-13 00:00:00 NULL -42 2009-01-01
-5 Some Some Some 1993-12-14 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1993-12-23 00:00:00 NULL -2 2009-01-01
-6 Some Some Some 1998-11-04 9487.6152 9487.6152 0.06 NULL 0.06 0.00 0.06 0.00 1998-11-05 00:00:00 NULL 46 2009-12-31
-6 Some Some Some 1995-07-26 8793.2736 8793.2736 0.03 NULL 0.03 0.00 0.03 0.00 1995-07-25 00:00:00 NULL -60 2009-01-01
-7 Some Some Some 1996-01-24 12613.136199999999 12613.136199999999 0.04 NULL 0.04 0.00 0.04 0.00 1996-01-29 00:00:00 NULL 38 2009-01-01
-8 Some Some Some 1994-01-17 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-01-14 00:00:00 NULL -44 2009-01-01
-8 Some Some Some 1996-02-03 11978.640000000001 11978.640000000001 0.02 0.02 0.02 0.02 0.02 0.02 1996-01-31 00:00:00 NULL -34 2009-01-01
-9 Some Some Some 1996-02-11 10666.6272 10666.6272 0.08 0.08 0.08 0.08 0.08 0.08 1996-02-19 00:00:00 NULL -12 2009-01-01
+1 Single Single Single 1994-12-06 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-12-15 00:00:00 NULL 3 2009-01-01
11 Many Many NULL 1994-03-22 0.0 0.0 0.05 NULL 0.05 0.00 0.05 0.00 1994-03-27 00:00:00 NULL 10 2009-01-01
12 Many Many NULL 1996-05-12 12655.998 12655.998 0.03 0.03 0.03 0.03 0.03 0.03 1996-06-03 00:00:00 NULL 82 2009-01-01
12 Many Many NULL 1997-02-01 12156.034800000001 12156.034800000001 0.05 NULL 0.05 0.00 0.05 0.00 1997-02-22 00:00:00 NULL 1 2009-01-01
+13 Many Many NULL 1993-04-06 0.0 0.0 0.02 NULL 0.02 0.00 0.02 0.00 1993-04-08 00:00:00 NULL 4 2009-01-01
13 Many Many NULL 1994-03-08 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-03-26 00:00:00 NULL 41 2009-01-01
13 Many Many NULL 1998-10-28 17554.68 17554.68 0.07 NULL 0.07 0.00 0.07 0.00 1998-11-06 00:00:00 NULL 53 2009-01-01
-13 Many Many NULL 1993-04-06 0.0 0.0 0.02 NULL 0.02 0.00 0.02 0.00 1993-04-08 00:00:00 NULL 4 2009-01-01
14 Many Many NULL 1995-01-04 0.0 0.0 0.02 NULL 0.02 0.00 0.02 0.00 1995-01-27 00:00:00 NULL 66 2009-01-01
15 Many Many NULL 1994-11-05 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-11-20 00:00:00 NULL 81 2009-12-31
-17 Many Many NULL 1996-03-18 20321.500799999998 20321.500799999998 NULL NULL 0.00 0.00 0.00 0.00 1996-03-22 00:00:00 NULL 39 2009-01-01
17 Many Many NULL 1994-07-07 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1994-07-03 00:00:00 NULL -4 2009-01-01
+17 Many Many NULL 1996-03-18 20321.500799999998 20321.500799999998 NULL NULL 0.00 0.00 0.00 0.00 1996-03-22 00:00:00 NULL 39 2009-01-01
19 Many Many NULL 1993-05-19 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1993-05-25 00:00:00 NULL 81 2009-01-01
19 Many Many NULL 1994-02-05 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1994-02-06 00:00:00 NULL -11 2009-01-01
+2 Two Two Two 1993-12-09 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-01-01 00:00:00 NULL -6 2009-01-01
+2 Two Two Two 1995-08-12 2011.3912000000003 2011.3912000000003 NULL NULL 0.0
<TRUNCATED>
[54/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 5a3f2c1..cb5e158 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -717,13 +717,10 @@ class Iface(fb303.FacebookService.Iface):
"""
pass
- def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context):
+ def alter_partitions_with_environment_context(self, req):
"""
Parameters:
- - db_name
- - tbl_name
- - new_parts
- - environment_context
+ - req
"""
pass
@@ -4734,24 +4731,18 @@ class Client(fb303.FacebookService.Client, Iface):
raise result.o2
return
- def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context):
+ def alter_partitions_with_environment_context(self, req):
"""
Parameters:
- - db_name
- - tbl_name
- - new_parts
- - environment_context
+ - req
"""
- self.send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context)
- self.recv_alter_partitions_with_environment_context()
+ self.send_alter_partitions_with_environment_context(req)
+ return self.recv_alter_partitions_with_environment_context()
- def send_alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context):
+ def send_alter_partitions_with_environment_context(self, req):
self._oprot.writeMessageBegin('alter_partitions_with_environment_context', TMessageType.CALL, self._seqid)
args = alter_partitions_with_environment_context_args()
- args.db_name = db_name
- args.tbl_name = tbl_name
- args.new_parts = new_parts
- args.environment_context = environment_context
+ args.req = req
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
@@ -4767,11 +4758,13 @@ class Client(fb303.FacebookService.Client, Iface):
result = alter_partitions_with_environment_context_result()
result.read(iprot)
iprot.readMessageEnd()
+ if result.success is not None:
+ return result.success
if result.o1 is not None:
raise result.o1
if result.o2 is not None:
raise result.o2
- return
+ raise TApplicationException(TApplicationException.MISSING_RESULT, "alter_partitions_with_environment_context failed: unknown result")
def alter_partition_with_environment_context(self, db_name, tbl_name, new_part, environment_context):
"""
@@ -11366,7 +11359,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
iprot.readMessageEnd()
result = alter_partitions_with_environment_context_result()
try:
- self._handler.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context)
+ result.success = self._handler.alter_partitions_with_environment_context(args.req)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
@@ -15987,10 +15980,10 @@ class get_databases_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype819, _size816) = iprot.readListBegin()
- for _i820 in xrange(_size816):
- _elem821 = iprot.readString()
- self.success.append(_elem821)
+ (_etype826, _size823) = iprot.readListBegin()
+ for _i827 in xrange(_size823):
+ _elem828 = iprot.readString()
+ self.success.append(_elem828)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -16013,8 +16006,8 @@ class get_databases_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter822 in self.success:
- oprot.writeString(iter822)
+ for iter829 in self.success:
+ oprot.writeString(iter829)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -16119,10 +16112,10 @@ class get_all_databases_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype826, _size823) = iprot.readListBegin()
- for _i827 in xrange(_size823):
- _elem828 = iprot.readString()
- self.success.append(_elem828)
+ (_etype833, _size830) = iprot.readListBegin()
+ for _i834 in xrange(_size830):
+ _elem835 = iprot.readString()
+ self.success.append(_elem835)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -16145,8 +16138,8 @@ class get_all_databases_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter829 in self.success:
- oprot.writeString(iter829)
+ for iter836 in self.success:
+ oprot.writeString(iter836)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -16916,12 +16909,12 @@ class get_type_all_result:
if fid == 0:
if ftype == TType.MAP:
self.success = {}
- (_ktype831, _vtype832, _size830 ) = iprot.readMapBegin()
- for _i834 in xrange(_size830):
- _key835 = iprot.readString()
- _val836 = Type()
- _val836.read(iprot)
- self.success[_key835] = _val836
+ (_ktype838, _vtype839, _size837 ) = iprot.readMapBegin()
+ for _i841 in xrange(_size837):
+ _key842 = iprot.readString()
+ _val843 = Type()
+ _val843.read(iprot)
+ self.success[_key842] = _val843
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -16944,9 +16937,9 @@ class get_type_all_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success))
- for kiter837,viter838 in self.success.items():
- oprot.writeString(kiter837)
- viter838.write(oprot)
+ for kiter844,viter845 in self.success.items():
+ oprot.writeString(kiter844)
+ viter845.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.o2 is not None:
@@ -17089,11 +17082,11 @@ class get_fields_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype842, _size839) = iprot.readListBegin()
- for _i843 in xrange(_size839):
- _elem844 = FieldSchema()
- _elem844.read(iprot)
- self.success.append(_elem844)
+ (_etype849, _size846) = iprot.readListBegin()
+ for _i850 in xrange(_size846):
+ _elem851 = FieldSchema()
+ _elem851.read(iprot)
+ self.success.append(_elem851)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -17128,8 +17121,8 @@ class get_fields_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter845 in self.success:
- iter845.write(oprot)
+ for iter852 in self.success:
+ iter852.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -17296,11 +17289,11 @@ class get_fields_with_environment_context_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype849, _size846) = iprot.readListBegin()
- for _i850 in xrange(_size846):
- _elem851 = FieldSchema()
- _elem851.read(iprot)
- self.success.append(_elem851)
+ (_etype856, _size853) = iprot.readListBegin()
+ for _i857 in xrange(_size853):
+ _elem858 = FieldSchema()
+ _elem858.read(iprot)
+ self.success.append(_elem858)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -17335,8 +17328,8 @@ class get_fields_with_environment_context_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter852 in self.success:
- iter852.write(oprot)
+ for iter859 in self.success:
+ iter859.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -17489,11 +17482,11 @@ class get_schema_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype856, _size853) = iprot.readListBegin()
- for _i857 in xrange(_size853):
- _elem858 = FieldSchema()
- _elem858.read(iprot)
- self.success.append(_elem858)
+ (_etype863, _size860) = iprot.readListBegin()
+ for _i864 in xrange(_size860):
+ _elem865 = FieldSchema()
+ _elem865.read(iprot)
+ self.success.append(_elem865)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -17528,8 +17521,8 @@ class get_schema_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter859 in self.success:
- iter859.write(oprot)
+ for iter866 in self.success:
+ iter866.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -17696,11 +17689,11 @@ class get_schema_with_environment_context_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype863, _size860) = iprot.readListBegin()
- for _i864 in xrange(_size860):
- _elem865 = FieldSchema()
- _elem865.read(iprot)
- self.success.append(_elem865)
+ (_etype870, _size867) = iprot.readListBegin()
+ for _i871 in xrange(_size867):
+ _elem872 = FieldSchema()
+ _elem872.read(iprot)
+ self.success.append(_elem872)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -17735,8 +17728,8 @@ class get_schema_with_environment_context_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter866 in self.success:
- iter866.write(oprot)
+ for iter873 in self.success:
+ iter873.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -18189,66 +18182,66 @@ class create_table_with_constraints_args:
elif fid == 2:
if ftype == TType.LIST:
self.primaryKeys = []
- (_etype870, _size867) = iprot.readListBegin()
- for _i871 in xrange(_size867):
- _elem872 = SQLPrimaryKey()
- _elem872.read(iprot)
- self.primaryKeys.append(_elem872)
+ (_etype877, _size874) = iprot.readListBegin()
+ for _i878 in xrange(_size874):
+ _elem879 = SQLPrimaryKey()
+ _elem879.read(iprot)
+ self.primaryKeys.append(_elem879)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.foreignKeys = []
- (_etype876, _size873) = iprot.readListBegin()
- for _i877 in xrange(_size873):
- _elem878 = SQLForeignKey()
- _elem878.read(iprot)
- self.foreignKeys.append(_elem878)
+ (_etype883, _size880) = iprot.readListBegin()
+ for _i884 in xrange(_size880):
+ _elem885 = SQLForeignKey()
+ _elem885.read(iprot)
+ self.foreignKeys.append(_elem885)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.uniqueConstraints = []
- (_etype882, _size879) = iprot.readListBegin()
- for _i883 in xrange(_size879):
- _elem884 = SQLUniqueConstraint()
- _elem884.read(iprot)
- self.uniqueConstraints.append(_elem884)
+ (_etype889, _size886) = iprot.readListBegin()
+ for _i890 in xrange(_size886):
+ _elem891 = SQLUniqueConstraint()
+ _elem891.read(iprot)
+ self.uniqueConstraints.append(_elem891)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.notNullConstraints = []
- (_etype888, _size885) = iprot.readListBegin()
- for _i889 in xrange(_size885):
- _elem890 = SQLNotNullConstraint()
- _elem890.read(iprot)
- self.notNullConstraints.append(_elem890)
+ (_etype895, _size892) = iprot.readListBegin()
+ for _i896 in xrange(_size892):
+ _elem897 = SQLNotNullConstraint()
+ _elem897.read(iprot)
+ self.notNullConstraints.append(_elem897)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.defaultConstraints = []
- (_etype894, _size891) = iprot.readListBegin()
- for _i895 in xrange(_size891):
- _elem896 = SQLDefaultConstraint()
- _elem896.read(iprot)
- self.defaultConstraints.append(_elem896)
+ (_etype901, _size898) = iprot.readListBegin()
+ for _i902 in xrange(_size898):
+ _elem903 = SQLDefaultConstraint()
+ _elem903.read(iprot)
+ self.defaultConstraints.append(_elem903)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.LIST:
self.checkConstraints = []
- (_etype900, _size897) = iprot.readListBegin()
- for _i901 in xrange(_size897):
- _elem902 = SQLCheckConstraint()
- _elem902.read(iprot)
- self.checkConstraints.append(_elem902)
+ (_etype907, _size904) = iprot.readListBegin()
+ for _i908 in xrange(_size904):
+ _elem909 = SQLCheckConstraint()
+ _elem909.read(iprot)
+ self.checkConstraints.append(_elem909)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -18269,43 +18262,43 @@ class create_table_with_constraints_args:
if self.primaryKeys is not None:
oprot.writeFieldBegin('primaryKeys', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys))
- for iter903 in self.primaryKeys:
- iter903.write(oprot)
+ for iter910 in self.primaryKeys:
+ iter910.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.foreignKeys is not None:
oprot.writeFieldBegin('foreignKeys', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys))
- for iter904 in self.foreignKeys:
- iter904.write(oprot)
+ for iter911 in self.foreignKeys:
+ iter911.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.uniqueConstraints is not None:
oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints))
- for iter905 in self.uniqueConstraints:
- iter905.write(oprot)
+ for iter912 in self.uniqueConstraints:
+ iter912.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.notNullConstraints is not None:
oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints))
- for iter906 in self.notNullConstraints:
- iter906.write(oprot)
+ for iter913 in self.notNullConstraints:
+ iter913.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.defaultConstraints is not None:
oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints))
- for iter907 in self.defaultConstraints:
- iter907.write(oprot)
+ for iter914 in self.defaultConstraints:
+ iter914.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.checkConstraints is not None:
oprot.writeFieldBegin('checkConstraints', TType.LIST, 7)
oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints))
- for iter908 in self.checkConstraints:
- iter908.write(oprot)
+ for iter915 in self.checkConstraints:
+ iter915.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -19865,10 +19858,10 @@ class truncate_table_args:
elif fid == 3:
if ftype == TType.LIST:
self.partNames = []
- (_etype912, _size909) = iprot.readListBegin()
- for _i913 in xrange(_size909):
- _elem914 = iprot.readString()
- self.partNames.append(_elem914)
+ (_etype919, _size916) = iprot.readListBegin()
+ for _i920 in xrange(_size916):
+ _elem921 = iprot.readString()
+ self.partNames.append(_elem921)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -19893,8 +19886,8 @@ class truncate_table_args:
if self.partNames is not None:
oprot.writeFieldBegin('partNames', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.partNames))
- for iter915 in self.partNames:
- oprot.writeString(iter915)
+ for iter922 in self.partNames:
+ oprot.writeString(iter922)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -20094,10 +20087,10 @@ class get_tables_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype919, _size916) = iprot.readListBegin()
- for _i920 in xrange(_size916):
- _elem921 = iprot.readString()
- self.success.append(_elem921)
+ (_etype926, _size923) = iprot.readListBegin()
+ for _i927 in xrange(_size923):
+ _elem928 = iprot.readString()
+ self.success.append(_elem928)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -20120,8 +20113,8 @@ class get_tables_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter922 in self.success:
- oprot.writeString(iter922)
+ for iter929 in self.success:
+ oprot.writeString(iter929)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -20271,10 +20264,10 @@ class get_tables_by_type_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype926, _size923) = iprot.readListBegin()
- for _i927 in xrange(_size923):
- _elem928 = iprot.readString()
- self.success.append(_elem928)
+ (_etype933, _size930) = iprot.readListBegin()
+ for _i934 in xrange(_size930):
+ _elem935 = iprot.readString()
+ self.success.append(_elem935)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -20297,8 +20290,8 @@ class get_tables_by_type_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter929 in self.success:
- oprot.writeString(iter929)
+ for iter936 in self.success:
+ oprot.writeString(iter936)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -20422,10 +20415,10 @@ class get_materialized_views_for_rewriting_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype933, _size930) = iprot.readListBegin()
- for _i934 in xrange(_size930):
- _elem935 = iprot.readString()
- self.success.append(_elem935)
+ (_etype940, _size937) = iprot.readListBegin()
+ for _i941 in xrange(_size937):
+ _elem942 = iprot.readString()
+ self.success.append(_elem942)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -20448,8 +20441,8 @@ class get_materialized_views_for_rewriting_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter936 in self.success:
- oprot.writeString(iter936)
+ for iter943 in self.success:
+ oprot.writeString(iter943)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -20522,10 +20515,10 @@ class get_table_meta_args:
elif fid == 3:
if ftype == TType.LIST:
self.tbl_types = []
- (_etype940, _size937) = iprot.readListBegin()
- for _i941 in xrange(_size937):
- _elem942 = iprot.readString()
- self.tbl_types.append(_elem942)
+ (_etype947, _size944) = iprot.readListBegin()
+ for _i948 in xrange(_size944):
+ _elem949 = iprot.readString()
+ self.tbl_types.append(_elem949)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -20550,8 +20543,8 @@ class get_table_meta_args:
if self.tbl_types is not None:
oprot.writeFieldBegin('tbl_types', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.tbl_types))
- for iter943 in self.tbl_types:
- oprot.writeString(iter943)
+ for iter950 in self.tbl_types:
+ oprot.writeString(iter950)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -20607,11 +20600,11 @@ class get_table_meta_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype947, _size944) = iprot.readListBegin()
- for _i948 in xrange(_size944):
- _elem949 = TableMeta()
- _elem949.read(iprot)
- self.success.append(_elem949)
+ (_etype954, _size951) = iprot.readListBegin()
+ for _i955 in xrange(_size951):
+ _elem956 = TableMeta()
+ _elem956.read(iprot)
+ self.success.append(_elem956)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -20634,8 +20627,8 @@ class get_table_meta_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter950 in self.success:
- iter950.write(oprot)
+ for iter957 in self.success:
+ iter957.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -20759,10 +20752,10 @@ class get_all_tables_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype954, _size951) = iprot.readListBegin()
- for _i955 in xrange(_size951):
- _elem956 = iprot.readString()
- self.success.append(_elem956)
+ (_etype961, _size958) = iprot.readListBegin()
+ for _i962 in xrange(_size958):
+ _elem963 = iprot.readString()
+ self.success.append(_elem963)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -20785,8 +20778,8 @@ class get_all_tables_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter957 in self.success:
- oprot.writeString(iter957)
+ for iter964 in self.success:
+ oprot.writeString(iter964)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -21022,10 +21015,10 @@ class get_table_objects_by_name_args:
elif fid == 2:
if ftype == TType.LIST:
self.tbl_names = []
- (_etype961, _size958) = iprot.readListBegin()
- for _i962 in xrange(_size958):
- _elem963 = iprot.readString()
- self.tbl_names.append(_elem963)
+ (_etype968, _size965) = iprot.readListBegin()
+ for _i969 in xrange(_size965):
+ _elem970 = iprot.readString()
+ self.tbl_names.append(_elem970)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -21046,8 +21039,8 @@ class get_table_objects_by_name_args:
if self.tbl_names is not None:
oprot.writeFieldBegin('tbl_names', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.tbl_names))
- for iter964 in self.tbl_names:
- oprot.writeString(iter964)
+ for iter971 in self.tbl_names:
+ oprot.writeString(iter971)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -21099,11 +21092,11 @@ class get_table_objects_by_name_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype968, _size965) = iprot.readListBegin()
- for _i969 in xrange(_size965):
- _elem970 = Table()
- _elem970.read(iprot)
- self.success.append(_elem970)
+ (_etype975, _size972) = iprot.readListBegin()
+ for _i976 in xrange(_size972):
+ _elem977 = Table()
+ _elem977.read(iprot)
+ self.success.append(_elem977)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -21120,8 +21113,8 @@ class get_table_objects_by_name_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter971 in self.success:
- iter971.write(oprot)
+ for iter978 in self.success:
+ iter978.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -21513,10 +21506,10 @@ class get_materialization_invalidation_info_args:
elif fid == 2:
if ftype == TType.LIST:
self.tbl_names = []
- (_etype975, _size972) = iprot.readListBegin()
- for _i976 in xrange(_size972):
- _elem977 = iprot.readString()
- self.tbl_names.append(_elem977)
+ (_etype982, _size979) = iprot.readListBegin()
+ for _i983 in xrange(_size979):
+ _elem984 = iprot.readString()
+ self.tbl_names.append(_elem984)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -21537,8 +21530,8 @@ class get_materialization_invalidation_info_args:
if self.tbl_names is not None:
oprot.writeFieldBegin('tbl_names', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.tbl_names))
- for iter978 in self.tbl_names:
- oprot.writeString(iter978)
+ for iter985 in self.tbl_names:
+ oprot.writeString(iter985)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -21599,12 +21592,12 @@ class get_materialization_invalidation_info_result:
if fid == 0:
if ftype == TType.MAP:
self.success = {}
- (_ktype980, _vtype981, _size979 ) = iprot.readMapBegin()
- for _i983 in xrange(_size979):
- _key984 = iprot.readString()
- _val985 = Materialization()
- _val985.read(iprot)
- self.success[_key984] = _val985
+ (_ktype987, _vtype988, _size986 ) = iprot.readMapBegin()
+ for _i990 in xrange(_size986):
+ _key991 = iprot.readString()
+ _val992 = Materialization()
+ _val992.read(iprot)
+ self.success[_key991] = _val992
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -21639,9 +21632,9 @@ class get_materialization_invalidation_info_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success))
- for kiter986,viter987 in self.success.items():
- oprot.writeString(kiter986)
- viter987.write(oprot)
+ for kiter993,viter994 in self.success.items():
+ oprot.writeString(kiter993)
+ viter994.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -22006,10 +21999,10 @@ class get_table_names_by_filter_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype991, _size988) = iprot.readListBegin()
- for _i992 in xrange(_size988):
- _elem993 = iprot.readString()
- self.success.append(_elem993)
+ (_etype998, _size995) = iprot.readListBegin()
+ for _i999 in xrange(_size995):
+ _elem1000 = iprot.readString()
+ self.success.append(_elem1000)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -22044,8 +22037,8 @@ class get_table_names_by_filter_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter994 in self.success:
- oprot.writeString(iter994)
+ for iter1001 in self.success:
+ oprot.writeString(iter1001)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -23015,11 +23008,11 @@ class add_partitions_args:
if fid == 1:
if ftype == TType.LIST:
self.new_parts = []
- (_etype998, _size995) = iprot.readListBegin()
- for _i999 in xrange(_size995):
- _elem1000 = Partition()
- _elem1000.read(iprot)
- self.new_parts.append(_elem1000)
+ (_etype1005, _size1002) = iprot.readListBegin()
+ for _i1006 in xrange(_size1002):
+ _elem1007 = Partition()
+ _elem1007.read(iprot)
+ self.new_parts.append(_elem1007)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -23036,8 +23029,8 @@ class add_partitions_args:
if self.new_parts is not None:
oprot.writeFieldBegin('new_parts', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
- for iter1001 in self.new_parts:
- iter1001.write(oprot)
+ for iter1008 in self.new_parts:
+ iter1008.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -23195,11 +23188,11 @@ class add_partitions_pspec_args:
if fid == 1:
if ftype == TType.LIST:
self.new_parts = []
- (_etype1005, _size1002) = iprot.readListBegin()
- for _i1006 in xrange(_size1002):
- _elem1007 = PartitionSpec()
- _elem1007.read(iprot)
- self.new_parts.append(_elem1007)
+ (_etype1012, _size1009) = iprot.readListBegin()
+ for _i1013 in xrange(_size1009):
+ _elem1014 = PartitionSpec()
+ _elem1014.read(iprot)
+ self.new_parts.append(_elem1014)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -23216,8 +23209,8 @@ class add_partitions_pspec_args:
if self.new_parts is not None:
oprot.writeFieldBegin('new_parts', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
- for iter1008 in self.new_parts:
- iter1008.write(oprot)
+ for iter1015 in self.new_parts:
+ iter1015.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -23391,10 +23384,10 @@ class append_partition_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype1012, _size1009) = iprot.readListBegin()
- for _i1013 in xrange(_size1009):
- _elem1014 = iprot.readString()
- self.part_vals.append(_elem1014)
+ (_etype1019, _size1016) = iprot.readListBegin()
+ for _i1020 in xrange(_size1016):
+ _elem1021 = iprot.readString()
+ self.part_vals.append(_elem1021)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -23419,8 +23412,8 @@ class append_partition_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter1015 in self.part_vals:
- oprot.writeString(iter1015)
+ for iter1022 in self.part_vals:
+ oprot.writeString(iter1022)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -23773,10 +23766,10 @@ class append_partition_with_environment_context_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype1019, _size1016) = iprot.readListBegin()
- for _i1020 in xrange(_size1016):
- _elem1021 = iprot.readString()
- self.part_vals.append(_elem1021)
+ (_etype1026, _size1023) = iprot.readListBegin()
+ for _i1027 in xrange(_size1023):
+ _elem1028 = iprot.readString()
+ self.part_vals.append(_elem1028)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -23807,8 +23800,8 @@ class append_partition_with_environment_context_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter1022 in self.part_vals:
- oprot.writeString(iter1022)
+ for iter1029 in self.part_vals:
+ oprot.writeString(iter1029)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.environment_context is not None:
@@ -24403,10 +24396,10 @@ class drop_partition_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype1026, _size1023) = iprot.readListBegin()
- for _i1027 in xrange(_size1023):
- _elem1028 = iprot.readString()
- self.part_vals.append(_elem1028)
+ (_etype1033, _size1030) = iprot.readListBegin()
+ for _i1034 in xrange(_size1030):
+ _elem1035 = iprot.readString()
+ self.part_vals.append(_elem1035)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -24436,8 +24429,8 @@ class drop_partition_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter1029 in self.part_vals:
- oprot.writeString(iter1029)
+ for iter1036 in self.part_vals:
+ oprot.writeString(iter1036)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.deleteData is not None:
@@ -24610,10 +24603,10 @@ class drop_partition_with_environment_context_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype1033, _size1030) = iprot.readListBegin()
- for _i1034 in xrange(_size1030):
- _elem1035 = iprot.readString()
- self.part_vals.append(_elem1035)
+ (_etype1040, _size1037) = iprot.readListBegin()
+ for _i1041 in xrange(_size1037):
+ _elem1042 = iprot.readString()
+ self.part_vals.append(_elem1042)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -24649,8 +24642,8 @@ class drop_partition_with_environment_context_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter1036 in self.part_vals:
- oprot.writeString(iter1036)
+ for iter1043 in self.part_vals:
+ oprot.writeString(iter1043)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.deleteData is not None:
@@ -25387,10 +25380,10 @@ class get_partition_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype1040, _size1037) = iprot.readListBegin()
- for _i1041 in xrange(_size1037):
- _elem1042 = iprot.readString()
- self.part_vals.append(_elem1042)
+ (_etype1047, _size1044) = iprot.readListBegin()
+ for _i1048 in xrange(_size1044):
+ _elem1049 = iprot.readString()
+ self.part_vals.append(_elem1049)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -25415,8 +25408,8 @@ class get_partition_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter1043 in self.part_vals:
- oprot.writeString(iter1043)
+ for iter1050 in self.part_vals:
+ oprot.writeString(iter1050)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -25575,11 +25568,11 @@ class exchange_partition_args:
if fid == 1:
if ftype == TType.MAP:
self.partitionSpecs = {}
- (_ktype1045, _vtype1046, _size1044 ) = iprot.readMapBegin()
- for _i1048 in xrange(_size1044):
- _key1049 = iprot.readString()
- _val1050 = iprot.readString()
- self.partitionSpecs[_key1049] = _val1050
+ (_ktype1052, _vtype1053, _size1051 ) = iprot.readMapBegin()
+ for _i1055 in xrange(_size1051):
+ _key1056 = iprot.readString()
+ _val1057 = iprot.readString()
+ self.partitionSpecs[_key1056] = _val1057
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -25616,9 +25609,9 @@ class exchange_partition_args:
if self.partitionSpecs is not None:
oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
- for kiter1051,viter1052 in self.partitionSpecs.items():
- oprot.writeString(kiter1051)
- oprot.writeString(viter1052)
+ for kiter1058,viter1059 in self.partitionSpecs.items():
+ oprot.writeString(kiter1058)
+ oprot.writeString(viter1059)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.source_db is not None:
@@ -25823,11 +25816,11 @@ class exchange_partitions_args:
if fid == 1:
if ftype == TType.MAP:
self.partitionSpecs = {}
- (_ktype1054, _vtype1055, _size1053 ) = iprot.readMapBegin()
- for _i1057 in xrange(_size1053):
- _key1058 = iprot.readString()
- _val1059 = iprot.readString()
- self.partitionSpecs[_key1058] = _val1059
+ (_ktype1061, _vtype1062, _size1060 ) = iprot.readMapBegin()
+ for _i1064 in xrange(_size1060):
+ _key1065 = iprot.readString()
+ _val1066 = iprot.readString()
+ self.partitionSpecs[_key1065] = _val1066
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -25864,9 +25857,9 @@ class exchange_partitions_args:
if self.partitionSpecs is not None:
oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
- for kiter1060,viter1061 in self.partitionSpecs.items():
- oprot.writeString(kiter1060)
- oprot.writeString(viter1061)
+ for kiter1067,viter1068 in self.partitionSpecs.items():
+ oprot.writeString(kiter1067)
+ oprot.writeString(viter1068)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.source_db is not None:
@@ -25949,11 +25942,11 @@ class exchange_partitions_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype1065, _size1062) = iprot.readListBegin()
- for _i1066 in xrange(_size1062):
- _elem1067 = Partition()
- _elem1067.read(iprot)
- self.success.append(_elem1067)
+ (_etype1072, _size1069) = iprot.readListBegin()
+ for _i1073 in xrange(_size1069):
+ _elem1074 = Partition()
+ _elem1074.read(iprot)
+ self.success.append(_elem1074)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -25994,8 +25987,8 @@ class exchange_partitions_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter1068 in self.success:
- iter1068.write(oprot)
+ for iter1075 in self.success:
+ iter1075.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -26089,10 +26082,10 @@ class get_partition_with_auth_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype1072, _size1069) = iprot.readListBegin()
- for _i1073 in xrange(_size1069):
- _elem1074 = iprot.readString()
- self.part_vals.append(_elem1074)
+ (_etype1079, _size1076) = iprot.readListBegin()
+ for _i1080 in xrange(_size1076):
+ _elem1081 = iprot.readString()
+ self.part_vals.append(_elem1081)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -26104,10 +26097,10 @@ class get_partition_with_auth_args:
elif fid == 5:
if ftype == TType.LIST:
self.group_names = []
- (_etype1078, _size1075) = iprot.readListBegin()
- for _i1079 in xrange(_size1075):
- _elem1080 = iprot.readString()
- self.group_names.append(_elem1080)
+ (_etype1085, _size1082) = iprot.readListBegin()
+ for _i1086 in xrange(_size1082):
+ _elem1087 = iprot.readString()
+ self.group_names.append(_elem1087)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -26132,8 +26125,8 @@ class get_partition_with_auth_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter1081 in self.part_vals:
- oprot.writeString(iter1081)
+ for iter1088 in self.part_vals:
+ oprot.writeString(iter1088)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.user_name is not None:
@@ -26143,8 +26136,8 @@ class get_partition_with_auth_args:
if self.group_names is not None:
oprot.writeFieldBegin('group_names', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter1082 in self.group_names:
- oprot.writeString(iter1082)
+ for iter1089 in self.group_names:
+ oprot.writeString(iter1089)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -26573,11 +26566,11 @@ class get_partitions_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype1086, _size1083) = iprot.readListBegin()
- for _i1087 in xrange(_size1083):
- _elem1088 = Partition()
- _elem1088.read(iprot)
- self.success.append(_elem1088)
+ (_etype1093, _size1090) = iprot.readListBegin()
+ for _i1094 in xrange(_size1090):
+ _elem1095 = Partition()
+ _elem1095.read(iprot)
+ self.success.append(_elem1095)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -26606,8 +26599,8 @@ class get_partitions_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter1089 in self.success:
- iter1089.write(oprot)
+ for iter1096 in self.success:
+ iter1096.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -26701,10 +26694,10 @@ class get_partitions_with_auth_args:
elif fid == 5:
if ftype == TType.LIST:
self.group_names = []
- (_etype1093, _size1090) = iprot.readListBegin()
- for _i1094 in xrange(_size1090):
- _elem1095 = iprot.readString()
- self.group_names.append(_elem1095)
+ (_etype1100, _size1097) = iprot.readListBegin()
+ for _i1101 in xrange(_size1097):
+ _elem1102 = iprot.readString()
+ self.group_names.append(_elem1102)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -26737,8 +26730,8 @@ class get_partitions_with_auth_args:
if self.group_names is not None:
oprot.writeFieldBegin('group_names', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter1096 in self.group_names:
- oprot.writeString(iter1096)
+ for iter1103 in self.group_names:
+ oprot.writeString(iter1103)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -26799,11 +26792,11 @@ class get_partitions_with_auth_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype1100, _size1097) = iprot.readListBegin()
- for _i1101 in xrange(_size1097):
- _elem1102 = Partition()
- _elem1102.read(iprot)
- self.success.append(_elem1102)
+ (_etype1107, _size1104) = iprot.readListBegin()
+ for _i1108 in xrange(_size1104):
+ _elem1109 = Partition()
+ _elem1109.read(iprot)
+ self.success.append(_elem1109)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -26832,8 +26825,8 @@ class get_partitions_with_auth_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter1103 in self.success:
- iter1103.write(oprot)
+ for iter1110 in self.success:
+ iter1110.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -26991,11 +26984,11 @@ class get_partitions_pspec_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype1107, _size1104) = iprot.readListBegin()
- for _i1108 in xrange(_size1104):
- _elem1109 = PartitionSpec()
- _elem1109.read(iprot)
- self.success.append(_elem1109)
+ (_etype1114, _size1111) = iprot.readListBegin()
+ for _i1115 in xrange(_size1111):
+ _elem1116 = PartitionSpec()
+ _elem1116.read(iprot)
+ self.success.append(_elem1116)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -27024,8 +27017,8 @@ class get_partitions_pspec_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter1110 in self.success:
- iter1110.write(oprot)
+ for iter1117 in self.success:
+ iter1117.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -27183,10 +27176,10 @@ class get_partition_names_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype1114, _size1111) = iprot.readListBegin()
- for _i1115 in xrange(_size1111):
- _elem1116 = iprot.readString()
- self.success.append(_elem1116)
+ (_etype1121, _size1118) = iprot.readListBegin()
+ for _i1122 in xrange(_size1118):
+ _elem1123 = iprot.readString()
+ self.success.append(_elem1123)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -27215,8 +27208,8 @@ class get_partition_names_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter1117 in self.success:
- oprot.writeString(iter1117)
+ for iter1124 in self.success:
+ oprot.writeString(iter1124)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -27456,10 +27449,10 @@ class get_partitions_ps_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype1121, _size1118) = iprot.readListBegin()
- for _i1122 in xrange(_size1118):
- _elem1123 = iprot.readString()
- self.part_vals.append(_elem1123)
+ (_etype1128, _size1125) = iprot.readListBegin()
+ for _i1129 in xrange(_size1125):
+ _elem1130 = iprot.readString()
+ self.part_vals.append(_elem1130)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -27489,8 +27482,8 @@ class get_partitions_ps_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter1124 in self.part_vals:
- oprot.writeString(iter1124)
+ for iter1131 in self.part_vals:
+ oprot.writeString(iter1131)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.max_parts is not None:
@@ -27554,11 +27547,11 @@ class get_partitions_ps_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype1128, _size1125) = iprot.readListBegin()
- for _i1129 in xrange(_size1125):
- _elem1130 = Partition()
- _elem1130.read(iprot)
- self.success.append(_elem1130)
+ (_etype1135, _size1132) = iprot.readListBegin()
+ for _i1136 in xrange(_size1132):
+ _elem1137 = Partition()
+ _elem1137.read(iprot)
+ self.success.append(_elem1137)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -27587,8 +27580,8 @@ class get_partitions_ps_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter1131 in self.success:
- iter1131.write(oprot)
+ for iter1138 in self.success:
+ iter1138.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -27675,10 +27668,10 @@ class get_partitions_ps_with_auth_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype1135, _size1132) = iprot.readListBegin()
- for _i1136 in xrange(_size1132):
- _elem1137 = iprot.readString()
- self.part_vals.append(_elem1137)
+ (_etype1142, _size1139) = iprot.readListBegin()
+ for _i1143 in xrange(_size1139):
+ _elem1144 = iprot.readString()
+ self.part_vals.append(_elem1144)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -27695,10 +27688,10 @@ class get_partitions_ps_with_auth_args:
elif fid == 6:
if ftype == TType.LIST:
self.group_names = []
- (_etype1141, _size1138) = iprot.readListBegin()
- for _i1142 in xrange(_size1138):
- _elem1143 = iprot.readString()
- self.group_names.append(_elem1143)
+ (_etype1148, _size1145) = iprot.readListBegin()
+ for _i1149 in xrange(_size1145):
+ _elem1150 = iprot.readString()
+ self.group_names.append(_elem1150)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -27723,8 +27716,8 @@ class get_partitions_ps_with_auth_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter1144 in self.part_vals:
- oprot.writeString(iter1144)
+ for iter1151 in self.part_vals:
+ oprot.writeString(iter1151)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.max_parts is not None:
@@ -27738,8 +27731,8 @@ class get_partitions_ps_with_auth_args:
if self.group_names is not None:
oprot.writeFieldBegin('group_names', TType.LIST, 6)
oprot.writeListBegin(TType.STRING, len(self.group_names))
- for iter1145 in self.group_names:
- oprot.writeString(iter1145)
+ for iter1152 in self.group_names:
+ oprot.writeString(iter1152)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -27801,11 +27794,11 @@ class get_partitions_ps_with_auth_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype1149, _size1146) = iprot.readListBegin()
- for _i1150 in xrange(_size1146):
- _elem1151 = Partition()
- _elem1151.read(iprot)
- self.success.append(_elem1151)
+ (_etype1156, _size1153) = iprot.readListBegin()
+ for _i1157 in xrange(_size1153):
+ _elem1158 = Partition()
+ _elem1158.read(iprot)
+ self.success.append(_elem1158)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -27834,8 +27827,8 @@ class get_partitions_ps_with_auth_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter1152 in self.success:
- iter1152.write(oprot)
+ for iter1159 in self.success:
+ iter1159.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -27916,10 +27909,10 @@ class get_partition_names_ps_args:
elif fid == 3:
if ftype == TType.LIST:
self.part_vals = []
- (_etype1156, _size1153) = iprot.readListBegin()
- for _i1157 in xrange(_size1153):
- _elem1158 = iprot.readString()
- self.part_vals.append(_elem1158)
+ (_etype1163, _size1160) = iprot.readListBegin()
+ for _i1164 in xrange(_size1160):
+ _elem1165 = iprot.readString()
+ self.part_vals.append(_elem1165)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -27949,8 +27942,8 @@ class get_partition_names_ps_args:
if self.part_vals is not None:
oprot.writeFieldBegin('part_vals', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.part_vals))
- for iter1159 in self.part_vals:
- oprot.writeString(iter1159)
+ for iter1166 in self.part_vals:
+ oprot.writeString(iter1166)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.max_parts is not None:
@@ -28014,10 +28007,10 @@ class get_partition_names_ps_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype1163, _size1160) = iprot.readListBegin()
- for _i1164 in xrange(_size1160):
- _elem1165 = iprot.readString()
- self.success.append(_elem1165)
+ (_etype1170, _size1167) = iprot.readListBegin()
+ for _i1171 in xrange(_size1167):
+ _elem1172 = iprot.readString()
+ self.success.append(_elem1172)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -28046,8 +28039,8 @@ class get_partition_names_ps_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter1166 in self.success:
- oprot.writeString(iter1166)
+ for iter1173 in self.success:
+ oprot.writeString(iter1173)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -28218,11 +28211,11 @@ class get_partitions_by_filter_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype1170, _size1167) = iprot.readListBegin()
- for _i1171 in xrange(_size1167):
- _elem1172 = Partition()
- _elem1172.read(iprot)
- self.success.append(_elem1172)
+ (_etype1177, _size1174) = iprot.readListBegin()
+ for _i1178 in xrange(_size1174):
+ _elem1179 = Partition()
+ _elem1179.read(iprot)
+ self.success.append(_elem1179)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -28251,8 +28244,8 @@ class get_partitions_by_filter_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter1173 in self.success:
- iter1173.write(oprot)
+ for iter1180 in self.success:
+ iter1180.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -28423,11 +28416,11 @@ class get_part_specs_by_filter_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype1177, _size1174) = iprot.readListBegin()
- for _i1178 in xrange(_size1174):
- _elem1179 = PartitionSpec()
- _elem1179.read(iprot)
- self.success.append(_elem1179)
+ (_etype1184, _size1181) = iprot.readListBegin()
+ for _i1185 in xrange(_size1181):
+ _elem1186 = PartitionSpec()
+ _elem1186.read(iprot)
+ self.success.append(_elem1186)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -28456,8 +28449,8 @@ class get_part_specs_by_filter_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter1180 in self.success:
- iter1180.write(oprot)
+ for iter1187 in self.success:
+ iter1187.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -28877,10 +28870,10 @@ class get_partitions_by_names_args:
elif fid == 3:
if ftype == TType.LIST:
self.names = []
- (_etype1184, _size1181) = iprot.readListBegin()
- for _i1185 in xrange(_size1181):
- _elem1186 = iprot.readString()
- self.names.append(_elem1186)
+ (_etype1191, _size1188) = iprot.readListBegin()
+ for _i1192 in xrange(_size1188):
+ _elem1193 = iprot.readString()
+ self.names.append(_elem1193)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -28905,8 +28898,8 @@ class get_partitions_by_names_args:
if self.names is not None:
oprot.writeFieldBegin('names', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.names))
- for iter1187 in self.names:
- oprot.writeString(iter1187)
+ for iter1194 in self.names:
+ oprot.writeString(iter1194)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -28965,11 +28958,11 @@ class get_partitions_by_names_result:
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype1191, _size1188) = iprot.readListBegin()
- for _i1192 in xrange(_size1188):
- _elem1193 = Partition()
- _elem1193.read(iprot)
- self.success.append(_elem1193)
+ (_etype1198, _size1195) = iprot.readListBegin()
+ for _i1199 in xrange(_size1195):
+ _elem1200 = Partition()
+ _elem1200.read(iprot)
+ self.success.append(_elem1200)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -28998,8 +28991,8 @@ class get_partitions_by_names_result:
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter1194 in self.success:
- iter1194.write(oprot)
+ for iter1201 in self.success:
+ iter1201.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.o1 is not None:
@@ -29249,11 +29242,11 @@ class alter_partitions_args:
elif fid == 3:
if ftype == TType.LIST:
self.new_parts = []
- (_etype1198, _size1195) = iprot.readListBegin()
- for _i1199 in xrange(_size1195):
- _elem1200 = Partition()
- _elem1200.read(iprot)
- self.new_parts.append(_elem1200)
+ (_etype1205, _size1202) = iprot.readListBegin()
+ for _i1206 in xrange(_size1202):
+ _elem1207 = Partition()
+ _elem1207.read(iprot)
+ self.new_parts.append(_elem1207)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -29278,8 +29271,8 @@ class alter_partitions_args:
if self.new_parts is not None:
oprot.writeFieldBegin('new_parts', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
- for iter1201 in self.new_parts:
- iter1201.write(oprot)
+ for iter1208 in self.new_parts:
+ iter1208.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
@@ -29390,25 +29383,16 @@ class alter_partitions_result:
class alter_partitions_with_environment_context_args:
"""
Attributes:
- - db_name
- - tbl_name
- - new_parts
- - environment_context
+ - req
"""
thrift_spec = (
None, # 0
- (1, TType.STRING, 'db_name', None, None, ), # 1
- (2, TType.STRING, 'tbl_name', None, None, ), # 2
- (3, TType.LIST, 'new_parts', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 3
- (4, TType.STRUCT, 'environment_context', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 4
+ (1, TType.STRUCT, 'req', (AlterPartitionsRequest, AlterPartitionsRequest.thrift_spec), None, ), # 1
)
- def __init__(self, db_name=None, tbl_name=None, new_parts=None, environment_context=None,):
- self.db_name = db_name
- self.tbl_name = tbl_name
- self.new_parts = new_parts
- self.environment_context = environment_context
+ def __init__(self, req=None,):
+ self.req = req
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -29420,30 +29404,9 @@ class alter_partitions_with_environment_context_args:
if ftype == TType.STOP:
break
if fid == 1:
- if ftype == TType.STRING:
- self.db_name = iprot.readString()
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.STRING:
- self.tbl_name = iprot.readString()
- else:
- iprot.skip(ftype)
- elif fid == 3:
- if ftype == TType.LIST:
- self.new_parts = []
- (_etype1205, _size1202) = iprot.readListBegin()
- for _i1206 in xrange(_size1202):
- _elem1207 = Partition()
- _elem1207.read(iprot)
- self.new_parts.append(_elem1207)
- iprot.readListEnd()
- else:
- iprot.skip(ftype)
- elif fid == 4:
if ftype == TType.STRUCT:
- self.environment_context = EnvironmentContext()
- self.environment_context.read(iprot)
+ self.req = AlterPartitionsRequest()
+ self.req.read(iprot)
else:
iprot.skip(ftype)
else:
@@ -29456,24 +29419,9 @@ class alter_partitions_with_environment_context_args:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('alter_partitions_with_environment_context_args')
- if self.db_name is not None:
- oprot.writeFieldBegin('db_name', TType.STRING, 1)
- oprot.writeString(self.db_name)
- oprot.writeFieldEnd()
- if self.tbl_name is not None:
- oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
- oprot.writeString(self.tbl_name)
- oprot.writeFieldEnd()
- if self.new_parts is not None:
- oprot.writeFieldBegin('new_parts', TType.LIST, 3)
- oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
- for iter1208 in self.new_parts:
- iter1208.write(oprot)
- oprot.writeListEnd()
- oprot.writeFieldEnd()
- if self.environment_context is not None:
- oprot.writeFieldBegin('environment_context', TType.STRUCT, 4)
- self.environment_context.write(oprot)
+ if self.req is not None:
+ oprot.writeFieldBegin('req', TType.STRUCT, 1)
+ self.req.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -29484,10 +29432,7 @@ class alter_partitions_with_environment_context_args:
def __hash__(self):
value = 17
- value = (value * 31) ^ hash(self.db_name)
- value = (value * 31) ^ hash(self.tbl_name)
- value = (value * 31) ^ hash(self.new_parts)
- value = (value * 31) ^ hash(self.environment_context)
+ value = (value * 31) ^ hash(self.req)
return value
def __repr__(self):
@@ -29504,17 +29449,19 @@ class alter_partitions_with_environment_context_args:
class alter_partitions_with_environment_context_result:
"""
Attributes:
+ - success
- o1
- o2
"""
thrift_spec = (
- None, # 0
+ (0, TType.STRUCT, 'success', (AlterPartitionsResponse, AlterPartitionsResponse.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'o1', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2
)
- def __init__(self, o1=None, o2=None,):
+ def __init__(self, success=None, o1=None, o2=None,):
+ self.success = success
self.o1 = o1
self.o2 = o2
@@ -29527,7 +29474,13 @@ class alter_partitions_with_environment_context_result:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
- if fid == 1:
+ if fid == 0:
+ if ftype == TType.STRUCT:
+ self.success = AlterPartitionsResponse()
+ self.success.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
if ftype == TType.STRUCT:
self.o1 = InvalidOperationException()
self.o1.read(iprot)
@@ -29549,6 +29502,10 @@ class alter_partitions_with_environment_context_result:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('alter_partitions_with_environment_context_result')
+ if self.success is not None:
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
+ oprot.writeFieldEnd()
if self.o1 is not None:
oprot.writeFieldBegin('o1', TType.STRUCT, 1)
self.o1.write(oprot)
@@ -29566,6 +29523,7 @@ class alter_partitions_with_environment_context_result:
def __hash__(self):
value = 17
+ value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.o1)
value = (value * 31) ^ hash(self.o2)
return value
[58/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
index 38d4f64..d9f17cc 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
@@ -56,6 +56,9 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creationMetadata", org.apache.thrift.protocol.TType.STRUCT, (short)16);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)17);
private static final org.apache.thrift.protocol.TField OWNER_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerType", org.apache.thrift.protocol.TType.I32, (short)18);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)19);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)20);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)21);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -81,6 +84,9 @@ import org.slf4j.LoggerFactory;
private CreationMetadata creationMetadata; // optional
private String catName; // optional
private PrincipalType ownerType; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -105,7 +111,14 @@ import org.slf4j.LoggerFactory;
*
* @see PrincipalType
*/
- OWNER_TYPE((short)18, "ownerType");
+ OWNER_TYPE((short)18, "ownerType"),
+ TXN_ID((short)19, "txnId"),
+ VALID_WRITE_ID_LIST((short)20, "validWriteIdList"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)21, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -156,6 +169,12 @@ import org.slf4j.LoggerFactory;
return CAT_NAME;
case 18: // OWNER_TYPE
return OWNER_TYPE;
+ case 19: // TXN_ID
+ return TXN_ID;
+ case 20: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
+ case 21: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -201,8 +220,9 @@ import org.slf4j.LoggerFactory;
private static final int __RETENTION_ISSET_ID = 2;
private static final int __TEMPORARY_ISSET_ID = 3;
private static final int __REWRITEENABLED_ISSET_ID = 4;
+ private static final int __TXNID_ISSET_ID = 5;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE};
+ private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -245,6 +265,12 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.OWNER_TYPE, new org.apache.thrift.meta_data.FieldMetaData("ownerType", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap);
}
@@ -254,6 +280,8 @@ import org.slf4j.LoggerFactory;
this.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.USER;
+ this.txnId = -1L;
+
}
public Table(
@@ -342,6 +370,13 @@ import org.slf4j.LoggerFactory;
if (other.isSetOwnerType()) {
this.ownerType = other.ownerType;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public Table deepCopy() {
@@ -374,6 +409,10 @@ import org.slf4j.LoggerFactory;
this.catName = null;
this.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.USER;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
+ this.isStatsCompliant = null;
}
public String getTableName() {
@@ -819,6 +858,82 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case TABLE_NAME:
@@ -965,6 +1080,30 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -1024,6 +1163,15 @@ import org.slf4j.LoggerFactory;
case OWNER_TYPE:
return getOwnerType();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -1071,6 +1219,12 @@ import org.slf4j.LoggerFactory;
return isSetCatName();
case OWNER_TYPE:
return isSetOwnerType();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -1250,6 +1404,33 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -1347,6 +1528,21 @@ import org.slf4j.LoggerFactory;
if (present_ownerType)
list.add(ownerType.getValue());
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -1538,6 +1734,36 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -1693,6 +1919,32 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -1914,6 +2166,30 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 19: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 20: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 21: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -2034,6 +2310,25 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -2106,7 +2401,16 @@ import org.slf4j.LoggerFactory;
if (struct.isSetOwnerType()) {
optionals.set(17);
}
- oprot.writeBitSet(optionals, 18);
+ if (struct.isSetTxnId()) {
+ optionals.set(18);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(19);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(20);
+ }
+ oprot.writeBitSet(optionals, 21);
if (struct.isSetTableName()) {
oprot.writeString(struct.tableName);
}
@@ -2174,12 +2478,21 @@ import org.slf4j.LoggerFactory;
if (struct.isSetOwnerType()) {
oprot.writeI32(struct.ownerType.getValue());
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(18);
+ BitSet incoming = iprot.readBitSet(21);
if (incoming.get(0)) {
struct.tableName = iprot.readString();
struct.setTableNameIsSet(true);
@@ -2276,6 +2589,18 @@ import org.slf4j.LoggerFactory;
struct.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
struct.setOwnerTypeIsSet(true);
}
+ if (incoming.get(18)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(19)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
+ if (incoming.get(20)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
index a663a64..c9b70a4 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
@@ -42,6 +42,8 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -53,13 +55,17 @@ import org.slf4j.LoggerFactory;
private String tblName; // required
private List<String> colNames; // required
private String catName; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
DB_NAME((short)1, "dbName"),
TBL_NAME((short)2, "tblName"),
COL_NAMES((short)3, "colNames"),
- CAT_NAME((short)4, "catName");
+ CAT_NAME((short)4, "catName"),
+ TXN_ID((short)5, "txnId"),
+ VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -82,6 +88,10 @@ import org.slf4j.LoggerFactory;
return COL_NAMES;
case 4: // CAT_NAME
return CAT_NAME;
+ case 5: // TXN_ID
+ return TXN_ID;
+ case 6: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
default:
return null;
}
@@ -122,7 +132,9 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
- private static final _Fields optionals[] = {_Fields.CAT_NAME};
+ private static final int __TXNID_ISSET_ID = 0;
+ private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -135,11 +147,17 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsRequest.class, metaDataMap);
}
public TableStatsRequest() {
+ this.txnId = -1L;
+
}
public TableStatsRequest(
@@ -157,6 +175,7 @@ import org.slf4j.LoggerFactory;
* Performs a deep copy on <i>other</i>.
*/
public TableStatsRequest(TableStatsRequest other) {
+ __isset_bitfield = other.__isset_bitfield;
if (other.isSetDbName()) {
this.dbName = other.dbName;
}
@@ -170,6 +189,10 @@ import org.slf4j.LoggerFactory;
if (other.isSetCatName()) {
this.catName = other.catName;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
}
public TableStatsRequest deepCopy() {
@@ -182,6 +205,9 @@ import org.slf4j.LoggerFactory;
this.tblName = null;
this.colNames = null;
this.catName = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
}
public String getDbName() {
@@ -291,6 +317,51 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case DB_NAME:
@@ -325,6 +396,22 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
}
}
@@ -342,6 +429,12 @@ import org.slf4j.LoggerFactory;
case CAT_NAME:
return getCatName();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
}
throw new IllegalStateException();
}
@@ -361,6 +454,10 @@ import org.slf4j.LoggerFactory;
return isSetColNames();
case CAT_NAME:
return isSetCatName();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
}
throw new IllegalStateException();
}
@@ -414,6 +511,24 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
return true;
}
@@ -441,6 +556,16 @@ import org.slf4j.LoggerFactory;
if (present_catName)
list.add(catName);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
return list.hashCode();
}
@@ -492,6 +617,26 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -545,6 +690,22 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -576,6 +737,8 @@ import org.slf4j.LoggerFactory;
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
+ // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+ __isset_bitfield = 0;
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
@@ -642,6 +805,22 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 5: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 6: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -684,6 +863,18 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -714,10 +905,22 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
optionals.set(0);
}
- oprot.writeBitSet(optionals, 1);
+ if (struct.isSetTxnId()) {
+ optionals.set(1);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(2);
+ }
+ oprot.writeBitSet(optionals, 3);
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
}
@Override
@@ -738,11 +941,19 @@ import org.slf4j.LoggerFactory;
}
}
struct.setColNamesIsSet(true);
- BitSet incoming = iprot.readBitSet(1);
+ BitSet incoming = iprot.readBitSet(3);
if (incoming.get(0)) {
struct.catName = iprot.readString();
struct.setCatNameIsSet(true);
}
+ if (incoming.get(1)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(2)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
index dff7d5c..4864f68 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
@@ -39,6 +39,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableStatsResult");
private static final org.apache.thrift.protocol.TField TABLE_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("tableStats", org.apache.thrift.protocol.TType.LIST, (short)1);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -47,10 +48,16 @@ import org.slf4j.LoggerFactory;
}
private List<ColumnStatisticsObj> tableStats; // required
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- TABLE_STATS((short)1, "tableStats");
+ TABLE_STATS((short)1, "tableStats"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)2, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -67,6 +74,8 @@ import org.slf4j.LoggerFactory;
switch(fieldId) {
case 1: // TABLE_STATS
return TABLE_STATS;
+ case 2: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -107,12 +116,15 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
+ private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.TABLE_STATS, new org.apache.thrift.meta_data.FieldMetaData("tableStats", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsResult.class, metaDataMap);
}
@@ -138,6 +150,9 @@ import org.slf4j.LoggerFactory;
}
this.tableStats = __this__tableStats;
}
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public TableStatsResult deepCopy() {
@@ -147,6 +162,7 @@ import org.slf4j.LoggerFactory;
@Override
public void clear() {
this.tableStats = null;
+ this.isStatsCompliant = null;
}
public int getTableStatsSize() {
@@ -187,6 +203,37 @@ import org.slf4j.LoggerFactory;
}
}
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case TABLE_STATS:
@@ -197,6 +244,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -205,6 +260,9 @@ import org.slf4j.LoggerFactory;
case TABLE_STATS:
return getTableStats();
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -218,6 +276,8 @@ import org.slf4j.LoggerFactory;
switch (field) {
case TABLE_STATS:
return isSetTableStats();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -244,6 +304,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -256,6 +325,11 @@ import org.slf4j.LoggerFactory;
if (present_tableStats)
list.add(tableStats);
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -277,6 +351,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -304,6 +388,16 @@ import org.slf4j.LoggerFactory;
sb.append(this.tableStats);
}
first = false;
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -370,6 +464,14 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 2: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -395,6 +497,13 @@ import org.slf4j.LoggerFactory;
}
oprot.writeFieldEnd();
}
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -419,6 +528,14 @@ import org.slf4j.LoggerFactory;
_iter428.write(oprot);
}
}
+ BitSet optionals = new BitSet();
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(0);
+ }
+ oprot.writeBitSet(optionals, 1);
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
@@ -436,6 +553,11 @@ import org.slf4j.LoggerFactory;
}
}
struct.setTableStatsIsSet(true);
+ BitSet incoming = iprot.readBitSet(1);
+ if (incoming.get(0)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
[10/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out
index 0b76bfb..95fa5ca 100644
--- a/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5
-POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5
-PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st double) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5a
-POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st double) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5a
@@ -39,7 +39,7 @@ STAGE PLANS:
alias: orc_merge5
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp), subtype (type: double)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -51,7 +51,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5a
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp), _col5 (type: double)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp), _col5 (type: double)
outputColumnNames: userid, string1, subtype, decimal1, ts, st
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -65,7 +65,7 @@ STAGE PLANS:
sort order: +
Map-reduce partition columns: _col0 (type: double)
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Reduce Operator Tree:
Group By Operator
aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3), compute_stats(VALUE._col4)
@@ -74,7 +74,7 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: double)
+ expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: double)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -102,7 +102,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5a
PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
@@ -116,22 +116,22 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8
POSTHOOK: Output: default@orc_merge5a@st=1.8
POSTHOOK: Output: default@orc_merge5a@st=8.0
POSTHOOK: Output: default@orc_merge5a@st=80.0
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -147,22 +147,22 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8
POSTHOOK: Output: default@orc_merge5a@st=1.8
POSTHOOK: Output: default@orc_merge5a@st=8.0
POSTHOOK: Output: default@orc_merge5a@st=80.0
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -178,22 +178,22 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8
POSTHOOK: Output: default@orc_merge5a@st=1.8
POSTHOOK: Output: default@orc_merge5a@st=8.0
POSTHOOK: Output: default@orc_merge5a@st=80.0
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -209,22 +209,22 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8
POSTHOOK: Output: default@orc_merge5a@st=1.8
POSTHOOK: Output: default@orc_merge5a@st=8.0
POSTHOOK: Output: default@orc_merge5a@st=80.0
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/orc_struct_type_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_struct_type_vectorization.q.out b/ql/src/test/results/clientpositive/orc_struct_type_vectorization.q.out
index c67e8d1..66daa07 100644
--- a/ql/src/test/results/clientpositive/orc_struct_type_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/orc_struct_type_vectorization.q.out
@@ -122,8 +122,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -257,8 +257,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/orc_merge5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge5.q.out b/ql/src/test/results/clientpositive/spark/orc_merge5.q.out
index e9e24b1..5033c13 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge5.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge5.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5_n5
-POSTHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5_n5
-PREHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5b_n0
-POSTHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5b_n0
@@ -46,7 +46,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -80,7 +80,7 @@ POSTHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,sub
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n5
POSTHOOK: Output: default@orc_merge5b_n0
-POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.string1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.subtype SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.ts SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -137,7 +137,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -208,7 +208,7 @@ POSTHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,sub
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n5
POSTHOOK: Output: default@orc_merge5b_n0
-POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.string1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.subtype SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.ts SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -222,7 +222,7 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5b_n0
POSTHOOK: Output: default@orc_merge5b_n0
Found 1 items
--rw-r--r-- 3 ### USER ### ### GROUP ### 1051 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-r--r-- 3 ### USER ### ### GROUP ### 1054 ### HDFS DATE ### hdfs://### HDFS PATH ###
PREHOOK: query: select * from orc_merge5b_n0
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_merge5b_n0
@@ -242,7 +242,7 @@ POSTHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,sub
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n5
POSTHOOK: Output: default@orc_merge5b_n0
-POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.string1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.subtype SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.ts SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -313,7 +313,7 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5b_n0
POSTHOOK: Output: default@orc_merge5b_n0
Found 1 items
--rw-r--r-- 3 ### USER ### ### GROUP ### 1051 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-r--r-- 3 ### USER ### ### GROUP ### 1054 ### HDFS DATE ### hdfs://### HDFS PATH ###
PREHOOK: query: select * from orc_merge5b_n0
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_merge5b_n0
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/orc_merge6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge6.q.out b/ql/src/test/results/clientpositive/spark/orc_merge6.q.out
index 99624bc..e50ab30 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge6.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge6.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5_n4
-POSTHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5_n4
-PREHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc
+PREHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (year string, hour int) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5a_n1
-POSTHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc
+POSTHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (year string, hour int) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5a_n1
@@ -46,7 +46,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -83,7 +83,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -96,7 +96,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -184,7 +184,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -258,7 +258,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -271,7 +271,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -297,9 +297,9 @@ POSTHOOK: Input: default@orc_merge5a_n1
POSTHOOK: Output: default@orc_merge5a_n1
POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24
Found 1 items
--rw-r--r-- 3 ### USER ### ### GROUP ### 1051 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-r--r-- 3 ### USER ### ### GROUP ### 1054 ### HDFS DATE ### hdfs://### HDFS PATH ###
Found 1 items
--rw-r--r-- 3 ### USER ### ### GROUP ### 1051 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-r--r-- 3 ### USER ### ### GROUP ### 1054 ### HDFS DATE ### hdfs://### HDFS PATH ###
PREHOOK: query: show partitions orc_merge5a_n1
PREHOOK: type: SHOWPARTITIONS
PREHOOK: Input: default@orc_merge5a_n1
@@ -334,7 +334,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -347,7 +347,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -472,9 +472,9 @@ POSTHOOK: Input: default@orc_merge5a_n1
POSTHOOK: Output: default@orc_merge5a_n1
POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24
Found 1 items
--rw-r--r-- 3 ### USER ### ### GROUP ### 1051 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-r--r-- 3 ### USER ### ### GROUP ### 1054 ### HDFS DATE ### hdfs://### HDFS PATH ###
Found 1 items
--rw-r--r-- 3 ### USER ### ### GROUP ### 1051 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-r--r-- 3 ### USER ### ### GROUP ### 1054 ### HDFS DATE ### hdfs://### HDFS PATH ###
PREHOOK: query: show partitions orc_merge5a_n1
PREHOOK: type: SHOWPARTITIONS
PREHOOK: Input: default@orc_merge5a_n1
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/orc_merge7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge7.q.out b/ql/src/test/results/clientpositive/spark/orc_merge7.q.out
index 05177f8..aa2f8bd 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge7.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge7.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5_n2 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5_n2
-POSTHOOK: query: create table orc_merge5_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5_n2 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5_n2
-PREHOOK: query: create table orc_merge5a_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+PREHOOK: query: create table orc_merge5a_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st double) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5a_n0
-POSTHOOK: query: create table orc_merge5a_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+POSTHOOK: query: create table orc_merge5a_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st double) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5a_n0
@@ -42,7 +42,7 @@ STAGE PLANS:
alias: orc_merge5_n2
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp), subtype (type: double)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -81,22 +81,22 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
POSTHOOK: Output: default@orc_merge5a_n0@st=1.8
POSTHOOK: Output: default@orc_merge5a_n0@st=8.0
POSTHOOK: Output: default@orc_merge5a_n0@st=80.0
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -112,22 +112,22 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
POSTHOOK: Output: default@orc_merge5a_n0@st=1.8
POSTHOOK: Output: default@orc_merge5a_n0@st=8.0
POSTHOOK: Output: default@orc_merge5a_n0@st=80.0
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -211,7 +211,7 @@ STAGE PLANS:
alias: orc_merge5_n2
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp), subtype (type: double)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -287,22 +287,22 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
POSTHOOK: Output: default@orc_merge5a_n0@st=1.8
POSTHOOK: Output: default@orc_merge5a_n0@st=8.0
POSTHOOK: Output: default@orc_merge5a_n0@st=80.0
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -318,22 +318,22 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
POSTHOOK: Output: default@orc_merge5a_n0@st=1.8
POSTHOOK: Output: default@orc_merge5a_n0@st=8.0
POSTHOOK: Output: default@orc_merge5a_n0@st=80.0
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -361,7 +361,7 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
Found 1 items
-rw-r--r-- 3 ### USER ### ### GROUP ### 614 ### HDFS DATE ### hdfs://### HDFS PATH ###
Found 1 items
--rw-r--r-- 3 ### USER ### ### GROUP ### 968 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-r--r-- 3 ### USER ### ### GROUP ### 971 ### HDFS DATE ### hdfs://### HDFS PATH ###
PREHOOK: query: show partitions orc_merge5a_n0
PREHOOK: type: SHOWPARTITIONS
PREHOOK: Input: default@orc_merge5a_n0
@@ -402,22 +402,22 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
POSTHOOK: Output: default@orc_merge5a_n0@st=1.8
POSTHOOK: Output: default@orc_merge5a_n0@st=8.0
POSTHOOK: Output: default@orc_merge5a_n0@st=80.0
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -433,22 +433,22 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
POSTHOOK: Output: default@orc_merge5a_n0@st=1.8
POSTHOOK: Output: default@orc_merge5a_n0@st=8.0
POSTHOOK: Output: default@orc_merge5a_n0@st=80.0
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -574,7 +574,7 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
Found 1 items
-rw-r--r-- 3 ### USER ### ### GROUP ### 614 ### HDFS DATE ### hdfs://### HDFS PATH ###
Found 1 items
--rw-r--r-- 3 ### USER ### ### GROUP ### 968 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-r--r-- 3 ### USER ### ### GROUP ### 971 ### HDFS DATE ### hdfs://### HDFS PATH ###
PREHOOK: query: show partitions orc_merge5a_n0
PREHOOK: type: SHOWPARTITIONS
PREHOOK: Input: default@orc_merge5a_n0
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out
index 72cccd8..f01c368 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5_n3
-POSTHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5_n3
-PREHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5b
-POSTHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5b
@@ -45,7 +45,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -79,7 +79,7 @@ POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtyp
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -92,7 +92,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -105,7 +105,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -118,7 +118,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -131,7 +131,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -144,7 +144,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
[26/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_case_when_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_case_when_1.q.out b/ql/src/test/results/clientpositive/llap/vector_case_when_1.q.out
index 2581311..ab083e7 100644
--- a/ql/src/test/results/clientpositive/llap/vector_case_when_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_case_when_1.q.out
@@ -140,7 +140,6 @@ SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT
@@ -182,7 +181,6 @@ SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
POSTHOOK: type: QUERY
Explain
PLAN VECTORIZATION:
@@ -197,9 +195,6 @@ STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
- Edges:
- Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
@@ -210,11 +205,13 @@ STAGE PLANS:
expressions: l_quantity (type: int), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE ('Huge number') END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE (null) END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN (null) ELSE (null) END (type: string), if((l_shipmode = 'SHIP '), date_add(l_shipdate, 10), date_add(l_shipdate, 5)) (type: date), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0) END (type: double), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0.0D) END (type: double), if((UDFToString(l_shipinstruct) = 'D
ELIVER IN PERSON'), null, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, null) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(10,2)), if((l_partkey > 30), CAST( l_receiptdate AS TIMESTAMP), CAST( l_commitdate AS TIMESTAMP)) (type: timestamp), if((l_suppkey > 10000), datediff(l_receiptdate, l_commitdate), null) (type: int), if((l_suppkey > 10000), null, datediff(l_receiptdate, l_commitdate)) (type: int), if(((l_suppkey % 500) > 100), DATE'2009-01-01', DATE'2009-12-31') (type: date)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: int)
- sort order: +
+ File Output Operator
+ compressed: false
Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: date), _col5 (type: double), _col6 (type: double), _col7 (type: decimal(10,2)), _col8 (type: decimal(10,2)), _col9 (type: decimal(12,2)), _col10 (type: decimal(12,2)), _col11 (type: decimal(10,2)), _col12 (type: decimal(10,2)), _col13 (type: timestamp), _col14 (type: int), _col15 (type: int), _col16 (type: date)
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Execution mode: llap
LLAP IO: all inputs
Map Vectorization:
@@ -223,40 +220,6 @@ STAGE PLANS:
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
notVectorizedReason: SELECT operator: Unexpected hive type name void
vectorized: false
- Reducer 2
- Execution mode: vectorized, llap
- Reduce Vectorization:
- enabled: true
- enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
- reduceColumnNullOrder: a
- reduceColumnSortOrder: +
- allNative: false
- usesVectorUDFAdaptor: false
- vectorized: true
- rowBatchContext:
- dataColumnCount: 17
- dataColumns: KEY.reducesinkkey0:int, VALUE._col0:string, VALUE._col1:string, VALUE._col2:string, VALUE._col3:date, VALUE._col4:double, VALUE._col5:double, VALUE._col6:decimal(10,2), VALUE._col7:decimal(10,2), VALUE._col8:decimal(12,2), VALUE._col9:decimal(12,2), VALUE._col10:decimal(10,2), VALUE._col11:decimal(10,2), VALUE._col12:timestamp, VALUE._col13:int, VALUE._col14:int, VALUE._col15:date
- partitionColumnCount: 0
- scratchColumnTypeNames: []
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: date), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: decimal(10,2)), VALUE._col7 (type: decimal(10,2)), VALUE._col8 (type: decimal(12,2)), VALUE._col9 (type: decimal(12,2)), VALUE._col10 (type: decimal(10,2)), VALUE._col11 (type: decimal(10,2)), VALUE._col12 (type: timestamp), VALUE._col13 (type: int), VALUE._col14 (type: int), VALUE._col15 (type: date)
- outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
- Select Vectorization:
- className: VectorSelectOperator
- native: true
- projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
- Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- File Sink Vectorization:
- className: VectorFileSinkOperator
- native: false
- Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
@@ -303,7 +266,6 @@ PREHOOK: query: SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
PREHOOK: type: QUERY
PREHOOK: Input: default@lineitem_test
#### A masked pattern was here ####
@@ -346,112 +308,111 @@ POSTHOOK: query: SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
POSTHOOK: type: QUERY
POSTHOOK: Input: default@lineitem_test
#### A masked pattern was here ####
quantity quantity_description quantity_description_2 quantity_description_3 expected_date field_1 field_2 field_3 field_4 field_5 field_6 field_7 field_8 field_9 field_10 field_11 field_12
-NULL Huge number NULL NULL NULL 0.0 0.0 NULL NULL NULL 0.00 NULL 0.00 NULL NULL NULL 2009-12-31
-1 Single Single Single 1994-12-06 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-12-15 00:00:00 NULL 3 2009-01-01
1 Single Single Single 1994-01-31 0.0 0.0 0.05 0.05 0.05 0.05 0.05 0.05 1994-01-28 00:00:00 NULL -36 2009-01-01
-2 Two Two Two 1993-12-09 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-01-01 00:00:00 NULL -6 2009-01-01
-2 Two Two Two 1995-08-12 2011.3912000000003 2011.3912000000003 NULL NULL 0.00 0.00 0.00 0.00 1995-08-23 00:00:00 NULL -45 2009-01-01
-3 Some Some Some 1998-07-09 2778.921 2778.921 0.02 NULL 0.02 0.00 0.02 0.00 1998-07-21 00:00:00 NULL 46 2009-12-31
-3 Some Some Some 1998-06-02 5137.6143 5137.6143 0.07 NULL 0.07 0.00 0.07 0.00 1998-06-02 00:00:00 NULL 60 2009-01-01
-3 Some Some Some 1994-06-11 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-06-15 00:00:00 NULL -42 2009-12-31
-4 Some Some Some 1995-08-09 5990.4936 5990.4936 0.03 NULL 0.03 0.00 0.03 0.00 1995-09-03 00:00:00 NULL -28 2009-01-01
-4 Some Some Some 1997-04-27 5669.7732000000005 5669.7732000000005 0.04 NULL 0.04 0.00 0.04 0.00 1997-04-20 00:00:00 NULL 79 2009-01-01
-5 Some Some Some 1996-02-15 6217.103999999999 6217.103999999999 0.02 NULL 0.02 0.00 0.02 0.00 1996-02-13 00:00:00 NULL -42 2009-01-01
-5 Some Some Some 1997-02-25 8116.96 8116.96 NULL NULL 0.00 0.00 0.00 0.00 1997-02-21 00:00:00 NULL 9 2009-01-01
-5 Some Some Some 1993-12-14 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1993-12-23 00:00:00 NULL -2 2009-01-01
-6 Some Some Some 1998-11-04 9487.6152 9487.6152 0.06 NULL 0.06 0.00 0.06 0.00 1998-11-05 00:00:00 NULL 46 2009-12-31
-6 Some Some Some 1995-07-26 8793.2736 8793.2736 0.03 NULL 0.03 0.00 0.03 0.00 1995-07-25 00:00:00 NULL -60 2009-01-01
-7 Some Some Some 1996-01-24 12613.136199999999 12613.136199999999 0.04 NULL 0.04 0.00 0.04 0.00 1996-01-29 00:00:00 NULL 38 2009-01-01
-8 Some Some Some 1996-02-03 11978.640000000001 11978.640000000001 0.02 0.02 0.02 0.02 0.02 0.02 1996-01-31 00:00:00 NULL -34 2009-01-01
-8 Some Some Some 1994-01-17 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-01-14 00:00:00 NULL -44 2009-01-01
-9 Some Some Some 1996-02-11 10666.6272 10666.6272 0.08 0.08 0.08 0.08 0.08 0.08 1996-02-19 00:00:00 NULL -12 2009-01-01
+1 Single Single Single 1994-12-06 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-12-15 00:00:00 NULL 3 2009-01-01
11 Many Many NULL 1994-03-22 0.0 0.0 0.05 NULL 0.05 0.00 0.05 0.00 1994-03-27 00:00:00 NULL 10 2009-01-01
12 Many Many NULL 1996-05-12 12655.998 12655.998 0.03 0.03 0.03 0.03 0.03 0.03 1996-06-03 00:00:00 NULL 82 2009-01-01
12 Many Many NULL 1997-02-01 12156.034800000001 12156.034800000001 0.05 NULL 0.05 0.00 0.05 0.00 1997-02-22 00:00:00 NULL 1 2009-01-01
-13 Many Many NULL 1998-10-28 17554.68 17554.68 0.07 NULL 0.07 0.00 0.07 0.00 1998-11-06 00:00:00 NULL 53 2009-01-01
13 Many Many NULL 1993-04-06 0.0 0.0 0.02 NULL 0.02 0.00 0.02 0.00 1993-04-08 00:00:00 NULL 4 2009-01-01
13 Many Many NULL 1994-03-08 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-03-26 00:00:00 NULL 41 2009-01-01
+13 Many Many NULL 1998-10-28 17554.68 17554.68 0.07 NULL 0.07 0.00 0.07 0.00 1998-11-06 00:00:00 NULL 53 2009-01-01
14 Many Many NULL 1995-01-04 0.0 0.0 0.02 NULL 0.02 0.00 0.02 0.00 1995-01-27 00:00:00 NULL 66 2009-01-01
15 Many Many NULL 1994-11-05 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-11-20 00:00:00 NULL 81 2009-12-31
-17 Many Many NULL 1996-03-18 20321.500799999998 20321.500799999998 NULL NULL 0.00 0.00 0.00 0.00 1996-03-22 00:00:00 NULL 39 2009-01-01
17 Many Many NULL 1994-07-07 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1994-07-03 00:00:00 NULL -4 2009-01-01
-19 Many Many NULL 1994-02-05 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1994-02-06 00:00:00 NULL -11 2009-01-01
+17 Many Many NULL 1996-03-18 20321.500799999998 20321.500799999998 NULL NULL 0.00 0.00 0.00 0.00 1996-03-22 00:00:00 NULL 39 2009-01-01
19 Many Many NULL 1993-05-19 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1993-05-25 00:00:00 NULL 81 2009-01-01
+19 Many Many NULL 1994-02-05 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1994-02-06 00:00:00 NULL -11 2009-01-01
+2 Two Two Two 1993-12-09 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-01-01 00:00:00 NULL -6 2009-01-01
+2 Two Two Two 1995-08-12 2011.3912000000003 2011.3912000000003 NULL NULL 0.00 0.00 0.00 0.00 1995-08-23 00:00:00 NULL -45 2009-01-01
20 Many Many NULL 1998-07-02 32042.592 32042.592 0.01 NULL 0.01 0.00 0.01 0.00 1998-07-02 00:00:00 NULL 40 2009-01-01
-21 Many Many NULL 1995-07-11 24640.0518 24640.0518 NULL NULL 0.00 0.00 0.00 0.00 1995-07-31 00:00:00 NULL 78 2009-01-01
21 Many Many NULL 1994-10-05 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-10-26 00:00:00 NULL 38 2009-01-01
-22 Many Many NULL 1998-10-14 28405.0184 28405.0184 0.06 NULL 0.06 0.00 0.06 0.00 1998-10-12 00:00:00 NULL -4 2009-01-01
+21 Many Many NULL 1995-07-11 24640.0518 24640.0518 NULL NULL 0.00 0.00 0.00 0.00 1995-07-31 00:00:00 NULL 78 2009-01-01
22 Many Many NULL 1995-07-22 39353.82 39353.82 0.05 NULL 0.05 0.00 0.05 0.00 1995-07-19 00:00:00 NULL 45 2009-01-01
-23 Many Many NULL 1997-04-24 33946.3785 33946.3785 NULL NULL 0.00 0.00 0.00 0.00 1997-05-06 00:00:00 NULL 81 2009-01-01
+22 Many Many NULL 1998-10-14 28405.0184 28405.0184 0.06 NULL 0.06 0.00 0.06 0.00 1998-10-12 00:00:00 NULL -4 2009-01-01
23 Many Many NULL 1994-07-24 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-07-25 00:00:00 NULL 26 2009-01-01
23 Many Many NULL 1994-10-13 0.0 0.0 0.00 NULL 0.00 0.00 0.00 0.00 1994-10-24 00:00:00 NULL 79 2009-12-31
+23 Many Many NULL 1997-04-24 33946.3785 33946.3785 NULL NULL 0.00 0.00 0.00 0.00 1997-05-06 00:00:00 NULL 81 2009-01-01
24 Many Many NULL 1996-02-26 31762.584 31762.584 0.00 0.00 0.00 0.00 0.00 0.00 1996-03-18 00:00:00 NULL 75 2009-01-01
24 Many Many NULL 1996-04-04 20542.032 20542.032 0.04 NULL 0.04 0.00 0.04 0.00 1996-04-01 00:00:00 NULL 18 2009-12-31
25 Many Many NULL 1995-12-06 27263.995 27263.995 NULL NULL 0.00 0.00 0.00 0.00 1995-12-21 00:00:00 NULL -4 2009-01-01
25 Many Many NULL 1998-04-15 43064.1575 43064.1575 0.07 NULL 0.07 0.00 0.07 0.00 1998-04-11 00:00:00 NULL -11 2009-01-01
-26 Many Many NULL 1996-11-09 39912.433600000004 39912.433600000004 0.04 NULL 0.04 0.00 0.04 0.00 1996-11-20 00:00:00 NULL 31 2009-01-01
-26 Many Many NULL 1995-04-25 0.0 0.0 0.03 NULL 0.03 0.00 0.03 0.00 1995-05-13 00:00:00 NULL 18 2009-01-01
26 Many Many NULL 1993-11-03 0.0 0.0 0.02 0.02 0.02 0.02 0.02 0.02 1993-11-04 00:00:00 NULL -44 2009-01-01
26 Many Many NULL 1994-10-21 0.0 0.0 0.08 NULL 0.08 0.00 0.08 0.00 1994-10-19 00:00:00 NULL 24 2009-01-01
-27 Many Many NULL 1998-06-29 45590.2425 45590.2425 NULL NULL 0.00 0.00 0.00 0.00 1998-06-29 00:00:00 NULL 4 2009-01-01
+26 Many Many NULL 1995-04-25 0.0 0.0 0.03 NULL 0.03 0.00 0.03 0.00 1995-05-13 00:00:00 NULL 18 2009-01-01
+26 Many Many NULL 1996-11-09 39912.433600000004 39912.433600000004 0.04 NULL 0.04 0.00 0.04 0.00 1996-11-20 00:00:00 NULL 31 2009-01-01
27 Many Many NULL 1994-01-26 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-01-23 00:00:00 NULL 62 2009-01-01
+27 Many Many NULL 1998-06-29 45590.2425 45590.2425 NULL NULL 0.00 0.00 0.00 0.00 1998-06-29 00:00:00 NULL 4 2009-01-01
28 Many Many NULL 1993-12-19 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1994-01-01 00:00:00 NULL -9 2009-01-01
-28 Many Many NULL 1996-04-26 26349.6324 26349.6324 0.06 NULL 0.06 0.00 0.06 0.00 1996-05-16 00:00:00 NULL 47 2009-01-01
28 Many Many NULL 1994-12-29 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1995-01-16 00:00:00 NULL 83 2009-01-01
28 Many Many NULL 1995-10-28 44866.219999999994 44866.219999999994 0.08 0.08 0.08 0.08 0.08 0.08 1995-10-26 00:00:00 NULL 60 2009-01-01
28 Many Many NULL 1996-02-06 45975.3616 45975.3616 0.02 NULL 0.02 0.00 0.02 0.00 1996-02-28 00:00:00 NULL 66 2009-01-01
28 Many Many NULL 1996-03-26 30855.6612 30855.6612 0.04 NULL 0.04 0.00 0.04 0.00 1996-04-20 00:00:00 NULL 12 2009-12-31
+28 Many Many NULL 1996-04-26 26349.6324 26349.6324 0.06 NULL 0.06 0.00 0.06 0.00 1996-05-16 00:00:00 NULL 47 2009-01-01
29 Many Many NULL 1997-01-30 39341.806 39341.806 NULL NULL 0.00 0.00 0.00 0.00 1997-01-27 00:00:00 NULL 0 2009-01-01
-30 Many Many NULL 1998-08-16 44561.46 44561.46 0.06 NULL 0.06 0.00 0.06 0.00 1998-08-14 00:00:00 NULL 34 2009-12-31
-30 Many Many NULL 1996-01-15 29770.173 29770.173 NULL NULL 0.00 0.00 0.00 0.00 1996-01-18 00:00:00 NULL 35 2009-12-31
+3 Some Some Some 1994-06-11 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-06-15 00:00:00 NULL -42 2009-12-31
+3 Some Some Some 1998-06-02 5137.6143 5137.6143 0.07 NULL 0.07 0.00 0.07 0.00 1998-06-02 00:00:00 NULL 60 2009-01-01
+3 Some Some Some 1998-07-09 2778.921 2778.921 0.02 NULL 0.02 0.00 0.02 0.00 1998-07-21 00:00:00 NULL 46 2009-12-31
30 Many Many NULL 1994-06-08 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-06-22 00:00:00 NULL 24 2009-01-01
+30 Many Many NULL 1996-01-15 29770.173 29770.173 NULL NULL 0.00 0.00 0.00 0.00 1996-01-18 00:00:00 NULL 35 2009-12-31
+30 Many Many NULL 1998-08-16 44561.46 44561.46 0.06 NULL 0.06 0.00 0.06 0.00 1998-08-14 00:00:00 NULL 34 2009-12-31
31 Many Many NULL 1993-11-03 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1993-11-08 00:00:00 NULL -41 2009-01-01
31 Many Many NULL 1994-02-24 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-02-20 00:00:00 NULL -19 2009-01-01
-32 Many Many NULL 1995-08-19 63313.3312 63313.3312 0.00 NULL 0.00 0.00 0.00 0.00 1995-08-27 00:00:00 NULL -41 2009-01-01
32 Many Many NULL 1993-12-14 0.0 0.0 0.05 NULL 0.05 0.00 0.05 0.00 1993-12-28 00:00:00 NULL -7 2009-12-31
-32 Many Many NULL 1996-10-07 44955.15839999999 44955.15839999999 0.05 NULL 0.05 0.00 0.05 0.00 1996-10-14 00:00:00 NULL -66 2009-12-31
32 Many Many NULL 1994-08-29 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-08-31 00:00:00 NULL 14 2009-01-01
+32 Many Many NULL 1995-08-19 63313.3312 63313.3312 0.00 NULL 0.00 0.00 0.00 0.00 1995-08-27 00:00:00 NULL -41 2009-01-01
32 Many Many NULL 1996-02-04 46146.7488 46146.7488 NULL NULL 0.00 0.00 0.00 0.00 1996-02-03 00:00:00 NULL -4 2009-01-01
+32 Many Many NULL 1996-10-07 44955.15839999999 44955.15839999999 0.05 NULL 0.05 0.00 0.05 0.00 1996-10-14 00:00:00 NULL -66 2009-12-31
33 Many Many NULL 1998-04-17 54174.12 54174.12 0.01 NULL 0.01 0.00 0.01 0.00 1998-04-15 00:00:00 NULL 26 2009-01-01
-34 Many Many NULL 1996-01-27 63982.002400000005 63982.002400000005 NULL NULL 0.00 0.00 0.00 0.00 1996-01-27 00:00:00 NULL 21 2009-01-01
34 Many Many NULL 1995-11-13 60586.5448 60586.5448 0.06 NULL 0.06 0.00 0.06 0.00 1995-11-26 00:00:00 NULL -50 2009-01-01
+34 Many Many NULL 1996-01-27 63982.002400000005 63982.002400000005 NULL NULL 0.00 0.00 0.00 0.00 1996-01-27 00:00:00 NULL 21 2009-01-01
34 Many Many NULL 1998-03-10 56487.763199999994 56487.763199999994 NULL NULL 0.00 0.00 0.00 0.00 1998-03-30 00:00:00 NULL -23 2009-01-01
35 Many Many NULL 1996-01-21 40475.225 40475.225 0.03 0.03 0.03 0.03 0.03 0.03 1996-01-22 00:00:00 NULL -32 2009-01-01
36 Many Many NULL 1996-04-17 41844.6756 41844.6756 0.06 0.06 0.06 0.06 0.06 0.06 1996-04-20 00:00:00 NULL 52 2009-01-01
-37 Many Many NULL 1993-04-23 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1993-04-14 00:00:00 NULL 15 2009-12-31
37 Many Many NULL 1992-05-02 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1992-05-02 00:00:00 NULL -13 2009-01-01
+37 Many Many NULL 1993-04-23 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1993-04-14 00:00:00 NULL 15 2009-12-31
37 Many Many NULL 1994-02-18 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-02-21 00:00:00 NULL -23 2009-01-01
-38 Many Many NULL 1997-02-02 44694.46 44694.46 0.05 0.05 0.05 0.05 0.05 0.05 1997-02-02 00:00:00 NULL 19 2009-01-01
38 Many Many NULL 1996-02-16 68028.3144 68028.3144 NULL NULL 0.00 0.00 0.00 0.00 1996-02-18 00:00:00 NULL -6 2009-01-01
+38 Many Many NULL 1997-02-02 44694.46 44694.46 0.05 0.05 0.05 0.05 0.05 0.05 1997-02-02 00:00:00 NULL 19 2009-01-01
39 Many Many NULL 1992-07-07 0.0 0.0 0.02 0.02 0.02 0.02 0.02 0.02 1992-07-28 00:00:00 NULL -21 2009-01-01
39 Many Many NULL 1998-02-03 45146.01 45146.01 NULL NULL 0.00 0.00 0.00 0.00 1998-02-18 00:00:00 NULL -48 2009-01-01
+4 Some Some Some 1995-08-09 5990.4936 5990.4936 0.03 NULL 0.03 0.00 0.03 0.00 1995-09-03 00:00:00 NULL -28 2009-01-01
+4 Some Some Some 1997-04-27 5669.7732000000005 5669.7732000000005 0.04 NULL 0.04 0.00 0.04 0.00 1997-04-20 00:00:00 NULL 79 2009-01-01
40 Many Many NULL 1992-07-26 0.0 0.0 0.03 NULL 0.03 0.00 0.03 0.00 1992-08-15 00:00:00 NULL 14 2009-01-01
40 Many Many NULL 1996-12-13 51224.736 51224.736 0.05 NULL 0.05 0.00 0.05 0.00 1997-01-01 00:00:00 NULL 71 2009-01-01
+41 Many Many NULL 1993-11-14 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1993-11-11 00:00:00 NULL -74 2009-01-01
41 Many Many NULL 1994-02-26 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1994-03-18 00:00:00 NULL 17 2009-01-01
41 Many Many NULL 1998-07-04 47989.6144 47989.6144 0.08 NULL 0.08 0.00 0.08 0.00 1998-07-06 00:00:00 NULL 9 2009-01-01
-41 Many Many NULL 1993-11-14 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1993-11-11 00:00:00 NULL -74 2009-01-01
42 Many Many NULL 1994-08-05 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-08-28 00:00:00 NULL 33 2009-12-31
42 Many Many NULL 1996-02-13 68289.9672 68289.9672 0.00 NULL 0.00 0.00 0.00 0.00 1996-02-23 00:00:00 NULL 33 2009-01-01
-43 Many Many NULL 1996-10-22 62727.3207 62727.3207 0.01 NULL 0.01 0.00 0.01 0.00 1996-10-26 00:00:00 NULL -19 2009-12-31
43 Many Many NULL 1992-07-15 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1992-08-02 00:00:00 NULL 27 2009-01-01
-44 Many Many NULL 1997-03-23 60781.124800000005 60781.124800000005 NULL NULL 0.00 0.00 0.00 0.00 1997-04-13 00:00:00 NULL 74 2009-12-31
+43 Many Many NULL 1996-10-22 62727.3207 62727.3207 0.01 NULL 0.01 0.00 0.01 0.00 1996-10-26 00:00:00 NULL -19 2009-12-31
44 Many Many NULL 1995-09-02 75106.658 75106.658 NULL NULL 0.00 0.00 0.00 0.00 1995-09-14 00:00:00 NULL 25 2009-01-01
44 Many Many NULL 1996-10-04 80882.4192 80882.4192 0.02 NULL 0.02 0.00 0.02 0.00 1996-09-30 00:00:00 NULL -48 2009-01-01
44 Many Many NULL 1996-11-19 48941.692800000004 48941.692800000004 0.06 NULL 0.06 0.00 0.06 0.00 1996-12-12 00:00:00 NULL -3 2009-01-01
+44 Many Many NULL 1997-03-23 60781.124800000005 60781.124800000005 NULL NULL 0.00 0.00 0.00 0.00 1997-04-13 00:00:00 NULL 74 2009-12-31
45 Many Many NULL 1994-02-07 0.0 0.0 0.00 NULL 0.00 0.00 0.00 0.00 1994-02-23 00:00:00 NULL 50 2009-01-01
45 Many Many NULL 1998-03-05 61489.35 61489.35 NULL NULL 0.00 0.00 0.00 0.00 1998-03-24 00:00:00 NULL 4 2009-01-01
46 Many Many NULL 1996-01-20 73475.892 73475.892 0.07 NULL 0.07 0.00 0.07 0.00 1996-02-03 00:00:00 NULL -53 2009-01-01
46 Many Many NULL 1996-10-01 77781.4092 77781.4092 NULL NULL 0.00 0.00 0.00 0.00 1996-10-26 00:00:00 NULL -54 2009-01-01
-46 Many Many NULL 1998-08-18 84565.5168 84565.5168 0.05 NULL 0.05 0.00 0.05 0.00 1998-08-29 00:00:00 NULL 52 2009-01-01
46 Many Many NULL 1998-07-01 56583.5144 56583.5144 0.05 NULL 0.05 0.00 0.05 0.00 1998-07-05 00:00:00 NULL 28 2009-01-01
+46 Many Many NULL 1998-08-18 84565.5168 84565.5168 0.05 NULL 0.05 0.00 0.05 0.00 1998-08-29 00:00:00 NULL 52 2009-01-01
48 Many Many NULL 1994-08-22 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1994-09-08 00:00:00 NULL 28 2009-01-01
49 Many Many NULL 1993-11-14 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1993-11-24 00:00:00 NULL -26 2009-12-31
+5 Some Some Some 1993-12-14 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1993-12-23 00:00:00 NULL -2 2009-01-01
+5 Some Some Some 1996-02-15 6217.103999999999 6217.103999999999 0.02 NULL 0.02 0.00 0.02 0.00 1996-02-13 00:00:00 NULL -42 2009-01-01
+5 Some Some Some 1997-02-25 8116.96 8116.96 NULL NULL 0.00 0.00 0.00 0.00 1997-02-21 00:00:00 NULL 9 2009-01-01
50 Many Many NULL 1994-08-13 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-08-26 00:00:00 NULL -48 2009-12-31
+6 Some Some Some 1995-07-26 8793.2736 8793.2736 0.03 NULL 0.03 0.00 0.03 0.00 1995-07-25 00:00:00 NULL -60 2009-01-01
+6 Some Some Some 1998-11-04 9487.6152 9487.6152 0.06 NULL 0.06 0.00 0.06 0.00 1998-11-05 00:00:00 NULL 46 2009-12-31
+7 Some Some Some 1996-01-24 12613.136199999999 12613.136199999999 0.04 NULL 0.04 0.00 0.04 0.00 1996-01-29 00:00:00 NULL 38 2009-01-01
+8 Some Some Some 1994-01-17 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-01-14 00:00:00 NULL -44 2009-01-01
+8 Some Some Some 1996-02-03 11978.640000000001 11978.640000000001 0.02 0.02 0.02 0.02 0.02 0.02 1996-01-31 00:00:00 NULL -34 2009-01-01
+9 Some Some Some 1996-02-11 10666.6272 10666.6272 0.08 0.08 0.08 0.08 0.08 0.08 1996-02-19 00:00:00 NULL -12 2009-01-01
+NULL Huge number NULL NULL NULL 0.0 0.0 NULL NULL NULL 0.00 NULL 0.00 NULL NULL NULL 2009-12-31
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT
L_QUANTITY as Quantity,
@@ -492,7 +453,6 @@ SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT
@@ -534,7 +494,6 @@ SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
POSTHOOK: type: QUERY
Explain
PLAN VECTORIZATION:
@@ -549,9 +508,6 @@ STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
- Edges:
- Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
@@ -560,7 +516,7 @@ STAGE PLANS:
Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:l_orderkey:int, 1:l_partkey:int, 2:l_suppkey:int, 3:l_linenumber:int, 4:l_quantity:int, 5:l_extendedprice:double, 6:l_discount:double, 7:l_tax:decimal(10,2), 8:l_returnflag:char(1), 9:l_linestatus:char(1), 10:l_shipdate:date, 11:l_commitdate:date, 12:l_receiptdate:date, 13:l_shipinstruct:varchar(20), 14:l_shipmode:char(10), 15:l_comment:string, 16:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:l_orderkey:int, 1:l_partkey:int, 2:l_suppkey:int, 3:l_linenumber:int, 4:l_quantity:int, 5:l_extendedprice:double, 6:l_discount:double, 7:l_tax:decimal(10,2)/DECIMAL_64, 8:l_returnflag:char(1), 9:l_linestatus:char(1), 10:l_shipdate:date, 11:l_commitdate:date, 12:l_receiptdate:date, 13:l_shipinstruct:varchar(20), 14:l_shipmode:char(10), 15:l_comment:string, 16:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: l_quantity (type: int), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE ('Huge number') END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE (null) END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN (null) ELSE (null) END (type: string), if((l_shipmode = 'SHIP '), date_add(l_shipdate, 10), date_add(l_shipdate, 5)) (type: date), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0) END (type: double), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0.0D) END (type: double), if((UDFToString(l_shipinstruct) = 'D
ELIVER IN PERSON'), null, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, null) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(10,2)), if((l_partkey > 30), CAST( l_receiptdate AS TIMESTAMP), CAST( l_commitdate AS TIMESTAMP)) (type: timestamp), if((l_suppkey > 10000), datediff(l_receiptdate, l_commitdate), null) (type: int), if((l_suppkey > 10000), null, datediff(l_receiptdate, l_commitdate)) (type: int), if(((l_suppkey % 500) > 100), DATE'2009-01-01', DATE'2009-12-31') (type: date)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
@@ -568,70 +524,35 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [4, 22, 24, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 38, 40, 43, 44]
- selectExpressions: IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 21:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 22:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean, val Somecol 21:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprStringScalarStringScalar(col 20:boolean, val Many, val Huge number)(children: LongColLessLongScalar(col 4:int, val 100) -> 20:boolean) -> 21:string) -> 22:string) -> 21:string) -> 22:string, IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 23:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 24:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean
, val Somecol 23:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprColumnNull(col 20:boolean, col 21:string, null)(children: LongColLessLongScalar(col 4:int, val 100) -> 20:boolean, ConstantVectorExpression(val Many) -> 21:string) -> 23:string) -> 24:string) -> 23:string) -> 24:string, IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 23:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 25:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean, val Somecol 23:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprNullNull(null, null) -> 23:string) -> 25:string) -> 23:string) -> 25:string, IfExprLongColumnLongColumn(col 17:boolean, col 18:date, col 19:date)(children: StringGroupColEqualCharScalar(col 14:char(10), val SHIP) -> 17:boolean, VectorUDFDateAddColSca
lar(col 10:date, val 10) -> 18:date, VectorUDFDateAddColScalar(col 10:date, val 5) -> 19:date) -> 26:date, IfExprDoubleColumnLongScalar(col 17:boolean, col 28:double, val 0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 17:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 27:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 27:double) -> 28:double) -> 27:double, IfExprDoubleColumnDoubleScalar(col 17:boolean, col 29:double, val 0.0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 17:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 28:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 28:double) -> 29:double) -> 28:double, IfExprNullColumn(col 17:boolean, null, col 7)(children: StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 17:boolean, col 7:decimal(10,2)) -> 30:decimal(10,2), IfExprColumnN
ull(col 18:boolean, col 7:decimal(10,2), null)(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 18:boolean, col 7:decimal(10,2)) -> 31:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 32:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 33:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar
(20)) -> 23:string) -> 19:boolean) -> 34:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 35:decimal(10,2), IfExprTimestampColumnColumn(col 19:boolean, col 36:timestampcol 37:timestamp)(children: LongColGreaterLongScalar(col 1:int, val 30) -> 19:boolean, CastDateToTimestamp(col 12:date) -> 36:timestamp, CastDateToTimestamp(col 11:date) -> 37:timestamp) -> 38:timestamp, IfExprColumnNull(col 19:boolean, col 39:int, null)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 19:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 39:int) -> 40:int, IfExprNullColumn(col 41:boolean, null, col 42)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 41:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 42:int) -> 43:int, IfExprLongScalarLongScala
r(col 45:boolean, val 14245, val 14609)(children: LongColGreaterLongScalar(col 44:int, val 100)(children: LongColModuloLongScalar(col 2:int, val 500) -> 44:int) -> 45:boolean) -> 44:date
+ selectExpressions: IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 21:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 22:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean, val Somecol 21:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprStringScalarStringScalar(col 20:boolean, val Many, val Huge number)(children: LongColLessLongScalar(col 4:int, val 100) -> 20:boolean) -> 21:string) -> 22:string) -> 21:string) -> 22:string, IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 23:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 24:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean
, val Somecol 23:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprColumnNull(col 20:boolean, col 21:string, null)(children: LongColLessLongScalar(col 4:int, val 100) -> 20:boolean, ConstantVectorExpression(val Many) -> 21:string) -> 23:string) -> 24:string) -> 23:string) -> 24:string, IfExprStringScalarStringGroupColumn(col 17:boolean, val Singlecol 23:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, IfExprStringScalarStringGroupColumn(col 18:boolean, val Twocol 25:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 18:boolean, IfExprStringScalarStringGroupColumn(col 19:boolean, val Somecol 23:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 19:boolean, IfExprNullNull(null, null) -> 23:string) -> 25:string) -> 23:string) -> 25:string, IfExprLongColumnLongColumn(col 17:boolean, col 18:date, col 19:date)(children: StringGroupColEqualCharScalar(col 14:char(10), val SHIP) -> 17:boolean, VectorUDFDateAddColSca
lar(col 10:date, val 10) -> 18:date, VectorUDFDateAddColScalar(col 10:date, val 5) -> 19:date) -> 26:date, IfExprDoubleColumnLongScalar(col 17:boolean, col 28:double, val 0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 17:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 27:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 27:double) -> 28:double) -> 27:double, IfExprDoubleColumnDoubleScalar(col 17:boolean, col 29:double, val 0.0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 17:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 28:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 28:double) -> 29:double) -> 28:double, IfExprNullColumn(col 17:boolean, null, col 46)(children: StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 17:boolean, ConvertDecimal64ToDecimal(col 7:decimal(10,2)/DECIMAL_
64) -> 46:decimal(10,2)) -> 30:decimal(10,2), IfExprColumnNull(col 18:boolean, col 47:decimal(10,2), null)(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 18:boolean, ConvertDecimal64ToDecimal(col 7:decimal(10,2)/DECIMAL_64) -> 47:decimal(10,2)) -> 31:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 32:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 33:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children:
StringGroupColEqualStringScalar(col 23:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 34:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 23:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 23:string) -> 19:boolean) -> 35:decimal(10,2), IfExprTimestampColumnColumn(col 19:boolean, col 36:timestampcol 37:timestamp)(children: LongColGreaterLongScalar(col 1:int, val 30) -> 19:boolean, CastDateToTimestamp(col 12:date) -> 36:timestamp, CastDateToTimestamp(col 11:date) -> 37:timestamp) -> 38:timestamp, IfExprColumnNull(col 19:boolean, col 39:int, null)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 19:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 39:int) -> 40:int, IfExprNullColumn(col 41:boolean, null, col 42)(children: LongColGreaterLongScalar(col 2:int, val 1
0000) -> 41:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 42:int) -> 43:int, IfExprLongScalarLongScalar(col 45:boolean, val 14245, val 14609)(children: LongColGreaterLongScalar(col 44:int, val 100)(children: LongColModuloLongScalar(col 2:int, val 500) -> 44:int) -> 45:boolean) -> 44:date
Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: int)
- sort order: +
- Reduce Sink Vectorization:
- className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [4]
- native: true
- nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
- valueColumnNums: [22, 24, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 38, 40, 43, 44]
+ File Output Operator
+ compressed: false
+ File Sink Vectorization:
+ className: VectorFileSinkOperator
+ native: false
Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: date), _col5 (type: double), _col6 (type: double), _col7 (type: decimal(10,2)), _col8 (type: decimal(10,2)), _col9 (type: decimal(12,2)), _col10 (type: decimal(12,2)), _col11 (type: decimal(10,2)), _col12 (type: decimal(10,2)), _col13 (type: timestamp), _col14 (type: int), _col15 (type: int), _col16 (type: date)
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
- allNative: true
+ allNative: false
usesVectorUDFAdaptor: true
vectorized: true
rowBatchContext:
dataColumnCount: 16
includeColumns: [1, 2, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14]
- dataColumns: l_orderkey:int, l_partkey:int, l_suppkey:int, l_linenumber:int, l_quantity:int, l_extendedprice:double, l_discount:double, l_tax:decimal(10,2), l_returnflag:char(1), l_linestatus:char(1), l_shipdate:date, l_commitdate:date, l_receiptdate:date, l_shipinstruct:varchar(20), l_shipmode:char(10), l_comment:string
- partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, bigint, bigint, bigint, string, string, string, string, string, bigint, double, double, double, decimal(10,2), decimal(10,2), decimal(12,2), decimal(12,2), decimal(10,2), decimal(10,2), timestamp, timestamp, timestamp, bigint, bigint, bigint, bigint, bigint, bigint, bigint]
- Reducer 2
- Execution mode: vectorized, llap
- Reduce Vectorization:
- enabled: true
- enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
- reduceColumnNullOrder: a
- reduceColumnSortOrder: +
- allNative: false
- usesVectorUDFAdaptor: false
- vectorized: true
- rowBatchContext:
- dataColumnCount: 17
- dataColumns: KEY.reducesinkkey0:int, VALUE._col0:string, VALUE._col1:string, VALUE._col2:string, VALUE._col3:date, VALUE._col4:double, VALUE._col5:double, VALUE._col6:decimal(10,2), VALUE._col7:decimal(10,2), VALUE._col8:decimal(12,2), VALUE._col9:decimal(12,2), VALUE._col10:decimal(10,2), VALUE._col11:decimal(10,2), VALUE._col12:timestamp, VALUE._col13:int, VALUE._col14:int, VALUE._col15:date
+ dataColumns: l_orderkey:int, l_partkey:int, l_suppkey:int, l_linenumber:int, l_quantity:int, l_extendedprice:double, l_discount:double, l_tax:decimal(10,2)/DECIMAL_64, l_returnflag:char(1), l_linestatus:char(1), l_shipdate:date, l_commitdate:date, l_receiptdate:date, l_shipinstruct:varchar(20), l_shipmode:char(10), l_comment:string
partitionColumnCount: 0
- scratchColumnTypeNames: []
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: date), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: decimal(10,2)), VALUE._col7 (type: decimal(10,2)), VALUE._col8 (type: decimal(12,2)), VALUE._col9 (type: decimal(12,2)), VALUE._col10 (type: decimal(10,2)), VALUE._col11 (type: decimal(10,2)), VALUE._col12 (type: timestamp), VALUE._col13 (type: int), VALUE._col14 (type: int), VALUE._col15 (type: date)
- outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
- Select Vectorization:
- className: VectorSelectOperator
- native: true
- projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
- Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- File Sink Vectorization:
- className: VectorFileSinkOperator
- native: false
- Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ scratchColumnTypeNames: [bigint, bigint, bigint, bigint, string, string, string, string, string, bigint, double, double, double, decimal(10,2), decimal(10,2), decimal(12,2), decimal(12,2), decimal(10,2), decimal(10,2), timestamp, timestamp, timestamp, bigint, bigint, bigint, bigint, bigint, bigint, bigint, decimal(10,2), decimal(10,2)]
Stage: Stage-0
Fetch Operator
@@ -678,7 +599,6 @@ PREHOOK: query: SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
PREHOOK: type: QUERY
PREHOOK: Input: default@lineitem_test
#### A masked pattern was here ####
@@ -721,112 +641,111 @@ POSTHOOK: query: SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
POSTHOOK: type: QUERY
POSTHOOK: Input: default@lineitem_test
#### A masked pattern was here ####
quantity quantity_description quantity_description_2 quantity_description_3 expected_date field_1 field_2 field_3 field_4 field_5 field_6 field_7 field_8 field_9 field_10 field_11 field_12
-NULL Huge number NULL NULL NULL 0.0 0.0 NULL NULL NULL 0.00 NULL 0.00 NULL NULL NULL 2009-12-31
-1 Single Single Single 1994-12-06 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-12-15 00:00:00 NULL 3 2009-01-01
1 Single Single Single 1994-01-31 0.0 0.0 0.05 0.05 0.05 0.05 0.05 0.05 1994-01-28 00:00:00 NULL -36 2009-01-01
-2 Two Two Two 1993-12-09 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-01-01 00:00:00 NULL -6 2009-01-01
-2 Two Two Two 1995-08-12 2011.3912000000003 2011.3912000000003 NULL NULL 0.00 0.00 0.00 0.00 1995-08-23 00:00:00 NULL -45 2009-01-01
-3 Some Some Some 1998-07-09 2778.921 2778.921 0.02 NULL 0.02 0.00 0.02 0.00 1998-07-21 00:00:00 NULL 46 2009-12-31
-3 Some Some Some 1998-06-02 5137.6143 5137.6143 0.07 NULL 0.07 0.00 0.07 0.00 1998-06-02 00:00:00 NULL 60 2009-01-01
-3 Some Some Some 1994-06-11 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-06-15 00:00:00 NULL -42 2009-12-31
-4 Some Some Some 1995-08-09 5990.4936 5990.4936 0.03 NULL 0.03 0.00 0.03 0.00 1995-09-03 00:00:00 NULL -28 2009-01-01
-4 Some Some Some 1997-04-27 5669.7732000000005 5669.7732000000005 0.04 NULL 0.04 0.00 0.04 0.00 1997-04-20 00:00:00 NULL 79 2009-01-01
-5 Some Some Some 1996-02-15 6217.103999999999 6217.103999999999 0.02 NULL 0.02 0.00 0.02 0.00 1996-02-13 00:00:00 NULL -42 2009-01-01
-5 Some Some Some 1997-02-25 8116.96 8116.96 NULL NULL 0.00 0.00 0.00 0.00 1997-02-21 00:00:00 NULL 9 2009-01-01
-5 Some Some Some 1993-12-14 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1993-12-23 00:00:00 NULL -2 2009-01-01
-6 Some Some Some 1998-11-04 9487.6152 9487.6152 0.06 NULL 0.06 0.00 0.06 0.00 1998-11-05 00:00:00 NULL 46 2009-12-31
-6 Some Some Some 1995-07-26 8793.2736 8793.2736 0.03 NULL 0.03 0.00 0.03 0.00 1995-07-25 00:00:00 NULL -60 2009-01-01
-7 Some Some Some 1996-01-24 12613.136199999999 12613.136199999999 0.04 NULL 0.04 0.00 0.04 0.00 1996-01-29 00:00:00 NULL 38 2009-01-01
-8 Some Some Some 1996-02-03 11978.640000000001 11978.640000000001 0.02 0.02 0.02 0.02 0.02 0.02 1996-01-31 00:00:00 NULL -34 2009-01-01
-8 Some Some Some 1994-01-17 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-01-14 00:00:00 NULL -44 2009-01-01
-9 Some Some Some 1996-02-11 10666.6272 10666.6272 0.08 0.08 0.08 0.08 0.08 0.08 1996-02-19 00:00:00 NULL -12 2009-01-01
+1 Single Single Single 1994-12-06 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-12-15 00:00:00 NULL 3 2009-01-01
11 Many Many NULL 1994-03-22 0.0 0.0 0.05 NULL 0.05 0.00 0.05 0.00 1994-03-27 00:00:00 NULL 10 2009-01-01
12 Many Many NULL 1996-05-12 12655.998 12655.998 0.03 0.03 0.03 0.03 0.03 0.03 1996-06-03 00:00:00 NULL 82 2009-01-01
12 Many Many NULL 1997-02-01 12156.034800000001 12156.034800000001 0.05 NULL 0.05 0.00 0.05 0.00 1997-02-22 00:00:00 NULL 1 2009-01-01
-13 Many Many NULL 1998-10-28 17554.68 17554.68 0.07 NULL 0.07 0.00 0.07 0.00 1998-11-06 00:00:00 NULL 53 2009-01-01
13 Many Many NULL 1993-04-06 0.0 0.0 0.02 NULL 0.02 0.00 0.02 0.00 1993-04-08 00:00:00 NULL 4 2009-01-01
13 Many Many NULL 1994-03-08 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-03-26 00:00:00 NULL 41 2009-01-01
+13 Many Many NULL 1998-10-28 17554.68 17554.68 0.07 NULL 0.07 0.00 0.07 0.00 1998-11-06 00:00:00 NULL 53 2009-01-01
14 Many Many NULL 1995-01-04 0.0 0.0 0.02 NULL 0.02 0.00 0.02 0.00 1995-01-27 00:00:00 NULL 66 2009-01-01
15 Many Many NULL 1994-11-05 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-11-20 00:00:00 NULL 81 2009-12-31
-17 Many Many NULL 1996-03-18 20321.500799999998 20321.500799999998 NULL NULL 0.00 0.00 0.00 0.00 1996-03-22 00:00:00 NULL 39 2009-01-01
17 Many Many NULL 1994-07-07 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1994-07-03 00:00:00 NULL -4 2009-01-01
-19 Many Many NULL 1994-02-05 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1994-02-06 00:00:00 NULL -11 2009-01-01
+17 Many Many NULL 1996-03-18 20321.500799999998 20321.500799999998 NULL NULL 0.00 0.00 0.00 0.00 1996-03-22 00:00:00 NULL 39 2009-01-01
19 Many Many NULL 1993-05-19 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1993-05-25 00:00:00 NULL 81 2009-01-01
+19 Many Many NULL 1994-02-05 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1994-02-06 00:00:00 NULL -11 2009-01-01
+2 Two Two Two 1993-12-09 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-01-01 00:00:00 NULL -6 2009-01-01
+2 Two Two Two 1995-08-12 2011.3912000000003 2011.3912000000003 NULL NULL 0.00 0.00 0.00 0.00 1995-08-23 00:00:00 NULL -45 2009-01-01
20 Many Many NULL 1998-07-02 32042.592 32042.592 0.01 NULL 0.01 0.00 0.01 0.00 1998-07-02 00:00:00 NULL 40 2009-01-01
-21 Many Many NULL 1995-07-11 24640.0518 24640.0518 NULL NULL 0.00 0.00 0.00 0.00 1995-07-31 00:00:00 NULL 78 2009-01-01
21 Many Many NULL 1994-10-05 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-10-26 00:00:00 NULL 38 2009-01-01
-22 Many Many NULL 1998-10-14 28405.0184 28405.0184 0.06 NULL 0.06 0.00 0.06 0.00 1998-10-12 00:00:00 NULL -4 2009-01-01
+21 Many Many NULL 1995-07-11 24640.0518 24640.0518 NULL NULL 0.00 0.00 0.00 0.00 1995-07-31 00:00:00 NULL 78 2009-01-01
22 Many Many NULL 1995-07-22 39353.82 39353.82 0.05 NULL 0.05 0.00 0.05 0.00 1995-07-19 00:00:00 NULL 45 2009-01-01
-23 Many Many NULL 1997-04-24 33946.3785 33946.3785 NULL NULL 0.00 0.00 0.00 0.00 1997-05-06 00:00:00 NULL 81 2009-01-01
+22 Many Many NULL 1998-10-14 28405.0184 28405.0184 0.06 NULL 0.06 0.00 0.06 0.00 1998-10-12 00:00:00 NULL -4 2009-01-01
23 Many Many NULL 1994-07-24 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-07-25 00:00:00 NULL 26 2009-01-01
23 Many Many NULL 1994-10-13 0.0 0.0 0.00 NULL 0.00 0.00 0.00 0.00 1994-10-24 00:00:00 NULL 79 2009-12-31
+23 Many Many NULL 1997-04-24 33946.3785 33946.3785 NULL NULL 0.00 0.00 0.00 0.00 1997-05-06 00:00:00 NULL 81 2009-01-01
24 Many Many NULL 1996-02-26 31762.584 31762.584 0.00 0.00 0.00 0.00 0.00 0.00 1996-03-18 00:00:00 NULL 75 2009-01-01
24 Many Many NULL 1996-04-04 20542.032 20542.032 0.04 NULL 0.04 0.00 0.04 0.00 1996-04-01 00:00:00 NULL 18 2009-12-31
25 Many Many NULL 1995-12-06 27263.995 27263.995 NULL NULL 0.00 0.00 0.00 0.00 1995-12-21 00:00:00 NULL -4 2009-01-01
25 Many Many NULL 1998-04-15 43064.1575 43064.1575 0.07 NULL 0.07 0.00 0.07 0.00 1998-04-11 00:00:00 NULL -11 2009-01-01
-26 Many Many NULL 1996-11-09 39912.433600000004 39912.433600000004 0.04 NULL 0.04 0.00 0.04 0.00 1996-11-20 00:00:00 NULL 31 2009-01-01
-26 Many Many NULL 1995-04-25 0.0 0.0 0.03 NULL 0.03 0.00 0.03 0.00 1995-05-13 00:00:00 NULL 18 2009-01-01
26 Many Many NULL 1993-11-03 0.0 0.0 0.02 0.02 0.02 0.02 0.02 0.02 1993-11-04 00:00:00 NULL -44 2009-01-01
26 Many Many NULL 1994-10-21 0.0 0.0 0.08 NULL 0.08 0.00 0.08 0.00 1994-10-19 00:00:00 NULL 24 2009-01-01
-27 Many Many NULL 1998-06-29 45590.2425 45590.2425 NULL NULL 0.00 0.00 0.00 0.00 1998-06-29 00:00:00 NULL 4 2009-01-01
+26 Many Many NULL 1995-04-25 0.0 0.0 0.03 NULL 0.03 0.00 0.03 0.00 1995-05-13 00:00:00 NULL 18 2009-01-01
+26 Many Many NULL 1996-11-09 39912.433600000004 39912.433600000004 0.04 NULL 0.04 0.00 0.04 0.00 1996-11-20 00:00:00 NULL 31 2009-01-01
27 Many Many NULL 1994-01-26 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-01-23 00:00:00 NULL 62 2009-01-01
+27 Many Many NULL 1998-06-29 45590.2425 45590.2425 NULL NULL 0.00 0.00 0.00 0.00 1998-06-29 00:00:00 NULL 4 2009-01-01
28 Many Many NULL 1993-12-19 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1994-01-01 00:00:00 NULL -9 2009-01-01
-28 Many Many NULL 1996-04-26 26349.6324 26349.6324 0.06 NULL 0.06 0.00 0.06 0.00 1996-05-16 00:00:00 NULL 47 2009-01-01
28 Many Many NULL 1994-12-29 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1995-01-16 00:00:00 NULL 83 2009-01-01
28 Many Many NULL 1995-10-28 44866.219999999994 44866.219999999994 0.08 0.08 0.08 0.08 0.08 0.08 1995-10-26 00:00:00 NULL 60 2009-01-01
28 Many Many NULL 1996-02-06 45975.3616 45975.3616 0.02 NULL 0.02 0.00 0.02 0.00 1996-02-28 00:00:00 NULL 66 2009-01-01
28 Many Many NULL 1996-03-26 30855.6612 30855.6612 0.04 NULL 0.04 0.00 0.04 0.00 1996-04-20 00:00:00 NULL 12 2009-12-31
+28 Many Many NULL 1996-04-26 26349.6324 26349.6324 0.06 NULL 0.06 0.00 0.06 0.00 1996-05-16 00:00:00 NULL 47 2009-01-01
29 Many Many NULL 1997-01-30 39341.806 39341.806 NULL NULL 0.00 0.00 0.00 0.00 1997-01-27 00:00:00 NULL 0 2009-01-01
-30 Many Many NULL 1998-08-16 44561.46 44561.46 0.06 NULL 0.06 0.00 0.06 0.00 1998-08-14 00:00:00 NULL 34 2009-12-31
-30 Many Many NULL 1996-01-15 29770.173 29770.173 NULL NULL 0.00 0.00 0.00 0.00 1996-01-18 00:00:00 NULL 35 2009-12-31
+3 Some Some Some 1994-06-11 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-06-15 00:00:00 NULL -42 2009-12-31
+3 Some Some Some 1998-06-02 5137.6143 5137.6143 0.07 NULL 0.07 0.00 0.07 0.00 1998-06-02 00:00:00 NULL 60 2009-01-01
+3 Some Some Some 1998-07-09 2778.921 2778.921 0.02 NULL 0.02 0.00 0.02 0.00 1998-07-21 00:00:00 NULL 46 2009-12-31
30 Many Many NULL 1994-06-08 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-06-22 00:00:00 NULL 24 2009-01-01
+30 Many Many NULL 1996-01-15 29770.173 29770.173 NULL NULL 0.00 0.00 0.00 0.00 1996-01-18 00:00:00 NULL 35 2009-12-31
+30 Many Many NULL 1998-08-16 44561.46 44561.46 0.06 NULL 0.06 0.00 0.06 0.00 1998-08-14 00:00:00 NULL 34 2009-12-31
31 Many Many NULL 1993-11-03 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1993-11-08 00:00:00 NULL -41 2009-01-01
31 Many Many NULL 1994-02-24 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-02-20 00:00:00 NULL -19 2009-01-01
-32 Many Many NULL 1995-08-19 63313.3312 63313.3312 0.00 NULL 0.00 0.00 0.00 0.00 1995-08-27 00:00:00 NULL -41 2009-01-01
32 Many Many NULL 1993-12-14 0.0 0.0 0.05 NULL 0.05 0.00 0.05 0.00 1993-12-28 00:00:00 NULL -7 2009-12-31
-32 Many Many NULL 1996-10-07 44955.15839999999 44955.15839999999 0.05 NULL 0.05 0.00 0.05 0.00 1996-10-14 00:00:00 NULL -66 2009-12-31
32 Many Many NULL 1994-08-29 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-08-31 00:00:00 NULL 14 2009-01-01
+32 Many Many NULL 1995-08-19 63313.3312 63313.3312 0.00 NULL 0.00 0.00 0.00 0.00 1995-08-27 00:00:00 NULL -41 2009-01-01
32 Many Many NULL 1996-02-04 46146.7488 46146.7488 NULL NULL 0.00 0.00 0.00 0.00 1996-02-03 00:00:00 NULL -4 2009-01-01
+32 Many Many NULL 1996-10-07 44955.15839999999 44955.15839999999 0.05 NULL 0.05 0.00 0.05 0.00 1996-10-14 00:00:00 NULL -66 2009-12-31
33 Many Many NULL 1998-04-17 54174.12 54174.12 0.01 NULL 0.01 0.00 0.01 0.00 1998-04-15 00:00:00 NULL 26 2009-01-01
-34 Many Many NULL 1996-01-27 63982.002400000005 63982.002400000005 NULL NULL 0.00 0.00 0.00 0.00 1996-01-27 00:00:00 NULL 21 2009-01-01
34 Many Many NULL 1995-11-13 60586.5448 60586.5448 0.06 NULL 0.06 0.00 0.06 0.00 1995-11-26 00:00:00 NULL -50 2009-01-01
+34 Many Many NULL 1996-01-27 63982.002400000005 63982.002400000005 NULL NULL 0.00 0.00 0.00 0.00 1996-01-27 00:00:00 NULL 21 2009-01-01
34 Many Many NULL 1998-03-10 56487.763199999994 56487.763199999994 NULL NULL 0.00 0.00 0.00 0.00 1998-03-30 00:00:00 NULL -23 2009-01-01
35 Many Many NULL 1996-01-21 40475.225 40475.225 0.03 0.03 0.03 0.03 0.03 0.03 1996-01-22 00:00:00 NULL -32 2009-01-01
36 Many Many NULL 1996-04-17 41844.6756 41844.6756 0.06 0.06 0.06 0.06 0.06 0.06 1996-04-20 00:00:00 NULL 52 2009-01-01
-37 Many Many NULL 1993-04-23 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1993-04-14 00:00:00 NULL 15 2009-12-31
37 Many Many NULL 1992-05-02 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1992-05-02 00:00:00 NULL -13 2009-01-01
+37 Many Many NULL 1993-04-23 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1993-04-14 00:00:00 NULL 15 2009-12-31
37 Many Many NULL 1994-02-18 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-02-21 00:00:00 NULL -23 2009-01-01
-38 Many Many NULL 1997-02-02 44694.46 44694.46 0.05 0.05 0.05 0.05 0.05 0.05 1997-02-02 00:00:00 NULL 19 2009-01-01
38 Many Many NULL 1996-02-16 68028.3144 68028.3144 NULL NULL 0.00 0.00 0.00 0.00 1996-02-18 00:00:00 NULL -6 2009-01-01
+38 Many Many NULL 1997-02-02 44694.46 44694.46 0.05 0.05 0.05 0.05 0.05 0.05 1997-02-02 00:00:00 NULL 19 2009-01-01
39 Many Many NULL 1992-07-07 0.0 0.0 0.02 0.02 0.02 0.02 0.02 0.02 1992-07-28 00:00:00 NULL -21 2009-01-01
39 Many Many NULL 1998-02-03 45146.01 45146.01 NULL NULL 0.00 0.00 0.00 0.00 1998-02-18 00:00:00 NULL -48 2009-01-01
+4 Some Some Some 1995-08-09 5990.4936 5990.4936 0.03 NULL 0.03 0.00 0.03 0.00 1995-09-03 00:00:00 NULL -28 2009-01-01
+4 Some Some Some 1997-04-27 5669.7732000000005 5669.7732000000005 0.04 NULL 0.04 0.00 0.04 0.00 1997-04-20 00:00:00 NULL 79 2009-01-01
40 Many Many NULL 1992-07-26 0.0 0.0 0.03 NULL 0.03 0.00 0.03 0.00 1992-08-15 00:00:00 NULL 14 2009-01-01
40 Many Many NULL 1996-12-13 51224.736 51224.736 0.05 NULL 0.05 0.00 0.05 0.00 1997-01-01 00:00:00 NULL 71 2009-01-01
+41 Many Many NULL 1993-11-14 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1993-11-11 00:00:00 NULL -74 2009-01-01
41 Many Many NULL 1994-02-26 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1994-03-18 00:00:00 NULL 17 2009-01-01
41 Many Many NULL 1998-07-04 47989.6144 47989.6144 0.08 NULL 0.08 0.00 0.08 0.00 1998-07-06 00:00:00 NULL 9 2009-01-01
-41 Many Many NULL 1993-11-14 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1993-11-11 00:00:00 NULL -74 2009-01-01
42 Many Many NULL 1994-08-05 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-08-28 00:00:00 NULL 33 2009-12-31
42 Many Many NULL 1996-02-13 68289.9672 68289.9672 0.00 NULL 0.00 0.00 0.00 0.00 1996-02-23 00:00:00 NULL 33 2009-01-01
-43 Many Many NULL 1996-10-22 62727.3207 62727.3207 0.01 NULL 0.01 0.00 0.01 0.00 1996-10-26 00:00:00 NULL -19 2009-12-31
43 Many Many NULL 1992-07-15 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1992-08-02 00:00:00 NULL 27 2009-01-01
-44 Many Many NULL 1997-03-23 60781.124800000005 60781.124800000005 NULL NULL 0.00 0.00 0.00 0.00 1997-04-13 00:00:00 NULL 74 2009-12-31
+43 Many Many NULL 1996-10-22 62727.3207 62727.3207 0.01 NULL 0.01 0.00 0.01 0.00 1996-10-26 00:00:00 NULL -19 2009-12-31
44 Many Many NULL 1995-09-02 75106.658 75106.658 NULL NULL 0.00 0.00 0.00 0.00 1995-09-14 00:00:00 NULL 25 2009-01-01
44 Many Many NULL 1996-10-04 80882.4192 80882.4192 0.02 NULL 0.02 0.00 0.02 0.00 1996-09-30 00:00:00 NULL -48 2009-01-01
44 Many Many NULL 1996-11-19 48941.692800000004 48941.692800000004 0.06 NULL 0.06 0.00 0.06 0.00 1996-12-12 00:00:00 NULL -3 2009-01-01
+44 Many Many NULL 1997-03-23 60781.124800000005 60781.124800000005 NULL NULL 0.00 0.00 0.00 0.00 1997-04-13 00:00:00 NULL 74 2009-12-31
45 Many Many NULL 1994-02-07 0.0 0.0 0.00 NULL 0.00 0.00 0.00 0.00 1994-02-23 00:00:00 NULL 50 2009-01-01
45 Many Many NULL 1998-03-05 61489.35 61489.35 NULL NULL 0.00 0.00 0.00 0.00 1998-03-24 00:00:00 NULL 4 2009-01-01
46 Many Many NULL 1996-01-20 73475.892 73475.892 0.07 NULL 0.07 0.00 0.07 0.00 1996-02-03 00:00:00 NULL -53 2009-01-01
46 Many Many NULL 1996-10-01 77781.4092 77781.4092 NULL NULL 0.00 0.00 0.00 0.00 1996-10-26 00:00:00 NULL -54 2009-01-01
-46 Many Many NULL 1998-08-18 84565.5168 84565.5168 0.05 NULL 0.05 0.00 0.05 0.00 1998-08-29 00:00:00 NULL 52 2009-01-01
46 Many Many NULL 1998-07-01 56583.5144 56583.5144 0.05 NULL 0.05 0.00 0.05 0.00 1998-07-05 00:00:00 NULL 28 2009-01-01
+46 Many Many NULL 1998-08-18 84565.5168 84565.5168 0.05 NULL 0.05 0.00 0.05 0.00 1998-08-29 00:00:00 NULL 52 2009-01-01
48 Many Many NULL 1994-08-22 0.0 0.0 0.07 NULL 0.07 0.00 0.07 0.00 1994-09-08 00:00:00 NULL 28 2009-01-01
49 Many Many NULL 1993-11-14 0.0 0.0 0.00 0.00 0.00 0.00 0.00 0.00 1993-11-24 00:00:00 NULL -26 2009-12-31
+5 Some Some Some 1993-12-14 0.0 0.0 0.03 0.03 0.03 0.03 0.03 0.03 1993-12-23 00:00:00 NULL -2 2009-01-01
+5 Some Some Some 1996-02-15 6217.103999999999 6217.103999999999 0.02 NULL 0.02 0.00 0.02 0.00 1996-02-13 00:00:00 NULL -42 2009-01-01
+5 Some Some Some 1997-02-25 8116.96 8116.96 NULL NULL 0.00 0.00 0.00 0.00 1997-02-21 00:00:00 NULL 9 2009-01-01
50 Many Many NULL 1994-08-13 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-08-26 00:00:00 NULL -48 2009-12-31
+6 Some Some Some 1995-07-26 8793.2736 8793.2736 0.03 NULL 0.03 0.00 0.03 0.00 1995-07-25 00:00:00 NULL -60 2009-01-01
+6 Some Some Some 1998-11-04 9487.6152 9487.6152 0.06 NULL 0.06 0.00 0.06 0.00 1998-11-05 00:00:00 NULL 46 2009-12-31
+7 Some Some Some 1996-01-24 12613.136199999999 12613.136199999999 0.04 NULL 0.04 0.00 0.04 0.00 1996-01-29 00:00:00 NULL 38 2009-01-01
+8 Some Some Some 1994-01-17 0.0 0.0 0.08 0.08 0.08 0.08 0.08 0.08 1994-01-14 00:00:00 NULL -44 2009-01-01
+8 Some Some Some 1996-02-03 11978.640000000001 11978.640000000001 0.02 0.02 0.02 0.02 0.02 0.02 1996-01-31 00:00:00 NULL -34 2009-01-01
+9 Some Some Some 1996-02-11 10666.6272 10666.6272 0.08 0.08 0.08 0.08 0.08 0.08 1996-02-19 00:00:00 NULL -12 2009-01-01
+NULL Huge number NULL NULL NULL 0.0 0.0 NULL NULL NULL 0.00 NULL 0.00 NULL NULL NULL 2009-12-31
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT
L_QUANTITY as Quantity,
@@ -867,7 +786,6 @@ SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT
@@ -909,7 +827,6 @@ SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
POSTHOOK: type: QUERY
Explain
PLAN VECTORIZATION:
@@ -924,9 +841,6 @@ STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
- Edges:
- Reducer 2 <- Map 1 (SIMPLE_EDGE)
-#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
@@ -935,7 +849,7 @@ STAGE PLANS:
Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:l_orderkey:int, 1:l_partkey:int, 2:l_suppkey:int, 3:l_linenumber:int, 4:l_quantity:int, 5:l_extendedprice:double, 6:l_discount:double, 7:l_tax:decimal(10,2), 8:l_returnflag:char(1), 9:l_linestatus:char(1), 10:l_shipdate:date, 11:l_commitdate:date, 12:l_receiptdate:date, 13:l_shipinstruct:varchar(20), 14:l_shipmode:char(10), 15:l_comment:string, 16:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:l_orderkey:int, 1:l_partkey:int, 2:l_suppkey:int, 3:l_linenumber:int, 4:l_quantity:int, 5:l_extendedprice:double, 6:l_discount:double, 7:l_tax:decimal(10,2)/DECIMAL_64, 8:l_returnflag:char(1), 9:l_linestatus:char(1), 10:l_shipdate:date, 11:l_commitdate:date, 12:l_receiptdate:date, 13:l_shipinstruct:varchar(20), 14:l_shipmode:char(10), 15:l_comment:string, 16:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: l_quantity (type: int), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE ('Huge number') END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN ('Many') ELSE (null) END (type: string), CASE WHEN ((l_quantity = 1)) THEN ('Single') WHEN ((l_quantity = 2)) THEN ('Two') WHEN ((l_quantity < 10)) THEN ('Some') WHEN ((l_quantity < 100)) THEN (null) ELSE (null) END (type: string), if((l_shipmode = 'SHIP '), date_add(l_shipdate, 10), date_add(l_shipdate, 5)) (type: date), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0) END (type: double), CASE WHEN ((l_returnflag = 'N')) THEN ((l_extendedprice * (1.0D - l_discount))) ELSE (0.0D) END (type: double), if((UDFToString(l_shipinstruct) = 'D
ELIVER IN PERSON'), null, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, null) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(12,2)), if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax) (type: decimal(10,2)), if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0) (type: decimal(10,2)), if((l_partkey > 30), CAST( l_receiptdate AS TIMESTAMP), CAST( l_commitdate AS TIMESTAMP)) (type: timestamp), if((l_suppkey > 10000), datediff(l_receiptdate, l_commitdate), null) (type: int), if((l_suppkey > 10000), null, datediff(l_receiptdate, l_commitdate)) (type: int), if(((l_suppkey % 500) > 100), DATE'2009-01-01', DATE'2009-12-31') (type: date)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
@@ -943,70 +857,35 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [4, 27, 38, 48, 52, 54, 60, 63, 65, 67, 68, 69, 70, 73, 76, 79, 80]
- selectExpressions: IfExprColumnCondExpr(col 17:boolean, col 18:stringcol 26:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, ConstantVectorExpression(val Single) -> 18:string, IfExprColumnCondExpr(col 19:boolean, col 20:stringcol 25:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 19:boolean, ConstantVectorExpression(val Two) -> 20:string, IfExprColumnCondExpr(col 21:boolean, col 22:stringcol 24:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 21:boolean, ConstantVectorExpression(val Some) -> 22:string, IfExprStringScalarStringScalar(col 23:boolean, val Many, val Huge number)(children: LongColLessLongScalar(col 4:int, val 100) -> 23:boolean) -> 24:string) -> 25:string) -> 26:string) -> 27:string, IfExprColumnCondExpr(col 23:boolean, col 28:stringcol 37:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 23:boolean, ConstantVectorExpression(val Single) -> 28:string, IfExprColumnCondExpr(col 29:boolea
n, col 30:stringcol 36:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 29:boolean, ConstantVectorExpression(val Two) -> 30:string, IfExprColumnCondExpr(col 31:boolean, col 32:stringcol 35:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 31:boolean, ConstantVectorExpression(val Some) -> 32:string, IfExprColumnNull(col 33:boolean, col 34:string, null)(children: LongColLessLongScalar(col 4:int, val 100) -> 33:boolean, ConstantVectorExpression(val Many) -> 34:string) -> 35:string) -> 36:string) -> 37:string) -> 38:string, IfExprColumnCondExpr(col 39:boolean, col 40:stringcol 47:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 39:boolean, ConstantVectorExpression(val Single) -> 40:string, IfExprColumnCondExpr(col 41:boolean, col 42:stringcol 46:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 41:boolean, ConstantVectorExpression(val Two) -> 42:string, IfExprColumnCondExpr(col 43:boolean, col 44:stringcol 45:string)(children: LongColLes
sLongScalar(col 4:int, val 10) -> 43:boolean, ConstantVectorExpression(val Some) -> 44:string, IfExprNullNull(null, null) -> 45:string) -> 46:string) -> 47:string) -> 48:string, IfExprCondExprCondExpr(col 49:boolean, col 50:datecol 51:date)(children: StringGroupColEqualCharScalar(col 14:char(10), val SHIP) -> 49:boolean, VectorUDFDateAddColScalar(col 10:date, val 10) -> 50:date, VectorUDFDateAddColScalar(col 10:date, val 5) -> 51:date) -> 52:date, IfExprDoubleColumnLongScalar(col 57:boolean, col 58:double, val 0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 57:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 54:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 54:double) -> 58:double) -> 54:double, IfExprCondExprColumn(col 57:boolean, col 59:double, col 58:double)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 57:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 58:double)(children: DoubleScalarSubtractD
oubleColumn(val 1.0, col 6:double) -> 58:double) -> 59:double, ConstantVectorExpression(val 0.0) -> 58:double) -> 60:double, IfExprNullColumn(col 62:boolean, null, col 7)(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 62:boolean, col 7:decimal(10,2)) -> 63:decimal(10,2), IfExprColumnNull(col 64:boolean, col 7:decimal(10,2), null)(children: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 64:boolean, col 7:decimal(10,2)) -> 65:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 67:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(childr
en: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 68:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 69:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 70:decimal(10,2), IfExprCondExprCondExpr(col 66:boolean, col 71:timestampcol 72:timestamp)(children: LongColGreaterLongScalar(col 1:int, val 30) -> 66:boolean, CastDateToTimestamp(col 12:date) -> 71:timestamp, CastDateToTimestamp(col 11:date) -> 72:timestamp) -> 73:timestamp, IfExprCondExprNull(col 74:b
oolean, col 75:int, null)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 74:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 75:int) -> 76:int, IfExprNullCondExpr(col 77:boolean, null, col 78:int)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 77:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 78:int) -> 79:int, IfExprLongScalarLongScalar(col 81:boolean, val 14245, val 14609)(children: LongColGreaterLongScalar(col 80:int, val 100)(children: LongColModuloLongScalar(col 2:int, val 500) -> 80:int) -> 81:boolean) -> 80:date
+ selectExpressions: IfExprColumnCondExpr(col 17:boolean, col 18:stringcol 26:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 17:boolean, ConstantVectorExpression(val Single) -> 18:string, IfExprColumnCondExpr(col 19:boolean, col 20:stringcol 25:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 19:boolean, ConstantVectorExpression(val Two) -> 20:string, IfExprColumnCondExpr(col 21:boolean, col 22:stringcol 24:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 21:boolean, ConstantVectorExpression(val Some) -> 22:string, IfExprStringScalarStringScalar(col 23:boolean, val Many, val Huge number)(children: LongColLessLongScalar(col 4:int, val 100) -> 23:boolean) -> 24:string) -> 25:string) -> 26:string) -> 27:string, IfExprColumnCondExpr(col 23:boolean, col 28:stringcol 37:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 23:boolean, ConstantVectorExpression(val Single) -> 28:string, IfExprColumnCondExpr(col 29:boolea
n, col 30:stringcol 36:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 29:boolean, ConstantVectorExpression(val Two) -> 30:string, IfExprColumnCondExpr(col 31:boolean, col 32:stringcol 35:string)(children: LongColLessLongScalar(col 4:int, val 10) -> 31:boolean, ConstantVectorExpression(val Some) -> 32:string, IfExprColumnNull(col 33:boolean, col 34:string, null)(children: LongColLessLongScalar(col 4:int, val 100) -> 33:boolean, ConstantVectorExpression(val Many) -> 34:string) -> 35:string) -> 36:string) -> 37:string) -> 38:string, IfExprColumnCondExpr(col 39:boolean, col 40:stringcol 47:string)(children: LongColEqualLongScalar(col 4:int, val 1) -> 39:boolean, ConstantVectorExpression(val Single) -> 40:string, IfExprColumnCondExpr(col 41:boolean, col 42:stringcol 46:string)(children: LongColEqualLongScalar(col 4:int, val 2) -> 41:boolean, ConstantVectorExpression(val Two) -> 42:string, IfExprColumnCondExpr(col 43:boolean, col 44:stringcol 45:string)(children: LongColLes
sLongScalar(col 4:int, val 10) -> 43:boolean, ConstantVectorExpression(val Some) -> 44:string, IfExprNullNull(null, null) -> 45:string) -> 46:string) -> 47:string) -> 48:string, IfExprCondExprCondExpr(col 49:boolean, col 50:datecol 51:date)(children: StringGroupColEqualCharScalar(col 14:char(10), val SHIP) -> 49:boolean, VectorUDFDateAddColScalar(col 10:date, val 10) -> 50:date, VectorUDFDateAddColScalar(col 10:date, val 5) -> 51:date) -> 52:date, IfExprDoubleColumnLongScalar(col 57:boolean, col 58:double, val 0)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 57:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 54:double)(children: DoubleScalarSubtractDoubleColumn(val 1.0, col 6:double) -> 54:double) -> 58:double) -> 54:double, IfExprCondExprColumn(col 57:boolean, col 59:double, col 58:double)(children: StringGroupColEqualCharScalar(col 8:char(1), val N) -> 57:boolean, DoubleColMultiplyDoubleColumn(col 5:double, col 58:double)(children: DoubleScalarSubtractD
oubleColumn(val 1.0, col 6:double) -> 58:double) -> 59:double, ConstantVectorExpression(val 0.0) -> 58:double) -> 60:double, IfExprNullColumn(col 62:boolean, null, col 82)(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 62:boolean, ConvertDecimal64ToDecimal(col 7:decimal(10,2)/DECIMAL_64) -> 82:decimal(10,2)) -> 63:decimal(10,2), IfExprColumnNull(col 64:boolean, col 83:decimal(10,2), null)(children: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 64:boolean, ConvertDecimal64ToDecimal(col 7:decimal(10,2)/DECIMAL_64) -> 83:decimal(10,2)) -> 65:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:b
oolean) -> 67:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 68:decimal(12,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'DELIVER IN PERSON'), 0, l_tax))(children: StringGroupColEqualStringScalar(col 61:string, val DELIVER IN PERSON)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 69:decimal(10,2), VectorUDFAdaptor(if((UDFToString(l_shipinstruct) = 'TAKE BACK RETURN'), l_tax, 0))(children: StringGroupColEqualStringScalar(col 61:string, val TAKE BACK RETURN)(children: CastStringGroupToString(col 13:varchar(20)) -> 61:string) -> 66:boolean) -> 70:decimal(10,2), IfExprCondExprCondExpr(col 66:boolean, col 71:timestampcol 72:timestamp)(children: LongColGreaterLongScalar(col 1:int, val 30) -> 66:boolean, CastDateToTimestamp(col 12
:date) -> 71:timestamp, CastDateToTimestamp(col 11:date) -> 72:timestamp) -> 73:timestamp, IfExprCondExprNull(col 74:boolean, col 75:int, null)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 74:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 75:int) -> 76:int, IfExprNullCondExpr(col 77:boolean, null, col 78:int)(children: LongColGreaterLongScalar(col 2:int, val 10000) -> 77:boolean, VectorUDFDateDiffColCol(col 12:date, col 11:date) -> 78:int) -> 79:int, IfExprLongScalarLongScalar(col 81:boolean, val 14245, val 14609)(children: LongColGreaterLongScalar(col 80:int, val 100)(children: LongColModuloLongScalar(col 2:int, val 500) -> 80:int) -> 81:boolean) -> 80:date
Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: int)
- sort order: +
- Reduce Sink Vectorization:
- className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [4]
- native: true
- nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
- valueColumnNums: [27, 38, 48, 52, 54, 60, 63, 65, 67, 68, 69, 70, 73, 76, 79, 80]
+ File Output Operator
+ compressed: false
+ File Sink Vectorization:
+ className: VectorFileSinkOperator
+ native: false
Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: date), _col5 (type: double), _col6 (type: double), _col7 (type: decimal(10,2)), _col8 (type: decimal(10,2)), _col9 (type: decimal(12,2)), _col10 (type: decimal(12,2)), _col11 (type: decimal(10,2)), _col12 (type: decimal(10,2)), _col13 (type: timestamp), _col14 (type: int), _col15 (type: int), _col16 (type: date)
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
- allNative: true
+ allNative: false
usesVectorUDFAdaptor: true
vectorized: true
rowBatchContext:
dataColumnCount: 16
includeColumns: [1, 2, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14]
- dataColumns: l_orderkey:int, l_partkey:int, l_suppkey:int, l_linenumber:int, l_quantity:int, l_extendedprice:double, l_discount:double, l_tax:decimal(10,2), l_returnflag:char(1), l_linestatus:char(1), l_shipdate:date, l_commitdate:date, l_receiptdate:date, l_shipinstruct:varchar(20), l_shipmode:char(10), l_comment:string
- partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, string, bigint, string, bigint, string, bigint, string, string, string, string, string, bigint, string, bigint, string, bigint, string, string, string, string, string, bigint, string, bigint, string, bigint, string, string, string, string, string, bigint, bigint, bigint, bigint, bigint, double, double, bigint, bigint, double, double, double, string, bigint, decimal(10,2), bigint, decimal(10,2), bigint, decimal(12,2), decimal(12,2), decimal(10,2), decimal(10,2), timestamp, timestamp, timestamp, bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint]
- Reducer 2
- Execution mode: vectorized, llap
- Reduce Vectorization:
- enabled: true
- enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
- reduceColumnNullOrder: a
- reduceColumnSortOrder: +
- allNative: false
- usesVectorUDFAdaptor: false
- vectorized: true
- rowBatchContext:
- dataColumnCount: 17
- dataColumns: KEY.reducesinkkey0:int, VALUE._col0:string, VALUE._col1:string, VALUE._col2:string, VALUE._col3:date, VALUE._col4:double, VALUE._col5:double, VALUE._col6:decimal(10,2), VALUE._col7:decimal(10,2), VALUE._col8:decimal(12,2), VALUE._col9:decimal(12,2), VALUE._col10:decimal(10,2), VALUE._col11:decimal(10,2), VALUE._col12:timestamp, VALUE._col13:int, VALUE._col14:int, VALUE._col15:date
+ dataColumns: l_orderkey:int, l_partkey:int, l_suppkey:int, l_linenumber:int, l_quantity:int, l_extendedprice:double, l_discount:double, l_tax:decimal(10,2)/DECIMAL_64, l_returnflag:char(1), l_linestatus:char(1), l_shipdate:date, l_commitdate:date, l_receiptdate:date, l_shipinstruct:varchar(20), l_shipmode:char(10), l_comment:string
partitionColumnCount: 0
- scratchColumnTypeNames: []
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: date), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: decimal(10,2)), VALUE._col7 (type: decimal(10,2)), VALUE._col8 (type: decimal(12,2)), VALUE._col9 (type: decimal(12,2)), VALUE._col10 (type: decimal(10,2)), VALUE._col11 (type: decimal(10,2)), VALUE._col12 (type: timestamp), VALUE._col13 (type: int), VALUE._col14 (type: int), VALUE._col15 (type: date)
- outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16
- Select Vectorization:
- className: VectorSelectOperator
- native: true
- projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
- Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- File Sink Vectorization:
- className: VectorFileSinkOperator
- native: false
- Statistics: Num rows: 101 Data size: 57327 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ scratchColumnTypeNames: [bigint, string, bigint, string, bigint, string, bigint, string, string, string, string, string, bigint, string, bigint, string, bigint, string, string, string, string, string, bigint, string, bigint, string, bigint, string, string, string, string, string, bigint, bigint, bigint, bigint, bigint, double, double, bigint, bigint, double, double, double, string, bigint, decimal(10,2), bigint, decimal(10,2), bigint, decimal(12,2), decimal(12,2), decimal(10,2), decimal(10,2), timestamp, timestamp, timestamp, bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint, decimal(10,2), decimal(10,2)]
Stage: Stage-0
Fetch Operator
@@ -1053,7 +932,6 @@ PREHOOK: query: SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
PREHOOK: type: QUERY
PREHOOK: Input: default@lineitem_test
#### A masked pattern was here ####
@@ -1096,109 +974,108 @@ POSTHOOK: query: SELECT
IF(L_SUPPKEY > 10000, NULL, DATEDIFF(L_RECEIPTDATE, L_COMMITDATE)) AS Field_11,
IF(L_SUPPKEY % 500 > 100, DATE_ADD('2008-12-31', 1), DATE_ADD('2008-12-31', 365)) AS Field_12
FROM lineitem_test
-ORDER BY Quantity
POSTHOOK: type: QUERY
POSTHOOK: Input: default@lineitem_test
#### A masked pattern was here ####
quantity quantity_description quantity_description_2 quantity_description_3 expected_date field_1 field_2 field_3 field_4 field_5 field_6 field_7 field_8 field_9 field_10 field_11 field_12
-NULL Huge number NULL NULL NULL 0.0 0.0 NULL NULL NULL 0.00 NULL 0.00 NULL NULL NULL 2009-12-31
-1 Single Single Single 1994-12-06 0.0 0.0 NULL NULL 0.00 0.00 0.00 0.00 1994-12-15 00:00:00 NULL 3 2009-01-01
1 Single Single Single 1994-01-31 0.0 0.0 0.05 0.05 0.05 0.05 0.05 0.05 1994-01-28 00:00:00 NULL -36 2009-01-01
-2 Two Two Two 1993-12-09 0.0 0.0 0.06 NULL 0.06 0.00 0.06 0.00 1994-01-01 00:00:00 NULL -6 2009-01-01
-2 Two Two Two 1995-08-12 2011.3912000000003 2011.3912000000003 NULL NULL 0.00 0.00 0.00 0.00 1995-08-23 00:00:00 NULL -45 2009-01-01
-3 Some Some Some 1998-07-09 2778.921 2778.921 0.02 NULL 0.02 0.00 0.02 0.00 1998-07-21 00:00:00 NULL 46 2009-12-31
-3 Some Some Some 1998-06-02 5137.6143 5137.6143 0.07 NULL 0.07 0.00 0.07 0.00 1998-06-02 00:00:00 NULL 60 2009-01-01
-3 Some Some Some 1994-06-11 0.0 0.0 0.04 NULL 0.04 0.00 0.04 0.00 1994-06-15 00:00:00 NULL -42 2009-12-31
-4 Some Some Some 1995-08-09 5990.4936 5990.4936 0.03
<TRUNCATED>
[08/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
index 395939a..c35156e 100644
--- a/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
@@ -166,8 +166,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
index bc9d102..c36c9ec 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out
@@ -109,8 +109,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -178,8 +178,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -262,8 +262,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -331,8 +331,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
index 16c1650..ecac4da 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
@@ -269,8 +269,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -338,8 +338,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -445,8 +445,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -512,8 +512,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -711,8 +711,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -752,8 +752,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -851,8 +851,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
index bd8e1a2..92ad63e 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
@@ -285,8 +285,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -326,8 +326,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -425,8 +425,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
index 4504a74..cee7995 100644
--- a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
@@ -154,8 +154,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -376,8 +376,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out b/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
index c6e17ab..2e5cb46 100644
--- a/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
@@ -172,8 +172,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
index ee74d1f..c906d0a 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
@@ -72,8 +72,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -251,8 +251,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -419,8 +419,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -578,8 +578,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -757,8 +757,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -925,8 +925,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1084,8 +1084,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1263,8 +1263,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1431,8 +1431,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1636,8 +1636,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_1.q.out b/ql/src/test/results/clientpositive/spark/vectorization_1.q.out
index 5303fe4..13cc510 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_1.q.out
@@ -105,8 +105,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_10.q.out b/ql/src/test/results/clientpositive/spark/vectorization_10.q.out
index a2e5786..23a04b8 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_10.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_10.q.out
@@ -97,8 +97,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_11.q.out b/ql/src/test/results/clientpositive/spark/vectorization_11.q.out
index dcf7c3e..b47433d 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_11.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_11.q.out
@@ -79,8 +79,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_12.q.out b/ql/src/test/results/clientpositive/spark/vectorization_12.q.out
index 1c302e1..9592735 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_12.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_12.q.out
@@ -132,8 +132,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
index 0f1b228..241098e 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
@@ -134,8 +134,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -484,8 +484,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_14.q.out b/ql/src/test/results/clientpositive/spark/vectorization_14.q.out
index 5266764..b2b7707a 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_14.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_14.q.out
@@ -134,8 +134,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_15.q.out b/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
index 5d51c87..835ab5f 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
@@ -130,8 +130,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_16.q.out b/ql/src/test/results/clientpositive/spark/vectorization_16.q.out
index 56bbb89..ebd3ddf 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_16.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_16.q.out
@@ -107,8 +107,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_17.q.out b/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
index 1877fb3..5091187 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
@@ -100,8 +100,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_2.q.out b/ql/src/test/results/clientpositive/spark/vectorization_2.q.out
index 1af403a..43e5074 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_2.q.out
@@ -109,8 +109,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_3.q.out b/ql/src/test/results/clientpositive/spark/vectorization_3.q.out
index 51d04cc..ec6de13 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_3.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_3.q.out
@@ -114,8 +114,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_4.q.out b/ql/src/test/results/clientpositive/spark/vectorization_4.q.out
index 07d30cf..3c5084e 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_4.q.out
@@ -109,8 +109,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_5.q.out b/ql/src/test/results/clientpositive/spark/vectorization_5.q.out
index 0fb430e..e3497af 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_5.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_5.q.out
@@ -102,8 +102,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_6.q.out b/ql/src/test/results/clientpositive/spark/vectorization_6.q.out
index bdb014c..600926c 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_6.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_6.q.out
@@ -91,8 +91,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_9.q.out b/ql/src/test/results/clientpositive/spark/vectorization_9.q.out
index 56bbb89..ebd3ddf 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_9.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_9.q.out
@@ -107,8 +107,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out b/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out
index a0631c3..9209d48 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out
@@ -71,8 +71,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out
index e6a0b5c..5a73d09 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out
@@ -52,8 +52,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -269,8 +269,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -486,8 +486,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -703,8 +703,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_nested_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_nested_udf.q.out b/ql/src/test/results/clientpositive/spark/vectorization_nested_udf.q.out
index 7f81581..2871c1a 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_nested_udf.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_nested_udf.q.out
@@ -62,8 +62,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out b/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
index 53e8bbf..48165bb 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_part_project.q.out
@@ -83,8 +83,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_pushdown.q.out b/ql/src/test/results/clientpositive/spark/vectorization_pushdown.q.out
index 9001a2a..c1ad2f9 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_pushdown.q.out
@@ -44,8 +44,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
index 8dbd679..9879e22 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
@@ -134,8 +134,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -394,8 +394,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -646,8 +646,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -877,8 +877,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1105,8 +1105,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1400,8 +1400,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1645,8 +1645,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1947,8 +1947,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2205,8 +2205,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2480,8 +2480,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2799,8 +2799,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3199,8 +3199,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3432,8 +3432,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3545,8 +3545,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3730,8 +3730,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3843,8 +3843,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3956,8 +3956,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4069,8 +4069,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4182,8 +4182,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4295,8 +4295,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
index b7232a8..7280567 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
@@ -85,8 +85,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -233,8 +233,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -322,8 +322,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -464,8 +464,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -580,7 +580,7 @@ STAGE PLANS:
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:member:decimal(10,0), 1:attr:decimal(10,0), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:member:decimal(10,0)/DECIMAL_64, 1:attr:decimal(10,0)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE ((attr + 2)) END (type: decimal(11,0))
outputColumnNames: _col0
@@ -588,7 +588,7 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [6]
- selectExpressions: IfExprCondExprCondExpr(col 3:boolean, col 4:decimal(11,0)col 5:decimal(11,0))(children: VectorUDFAdaptor((member = 1)) -> 3:boolean, DecimalColAddDecimalScalar(col 1:decimal(10,0), val 1) -> 4:decimal(11,0), DecimalColAddDecimalScalar(col 1:decimal(10,0), val 2) -> 5:decimal(11,0)) -> 6:decimal(11,0)
+ selectExpressions: IfExprCondExprCondExpr(col 3:boolean, col 7:decimal(11,0)col 8:decimal(11,0))(children: VectorUDFAdaptor((member = 1)) -> 3:boolean, ConvertDecimal64ToDecimal(col 4:decimal(11,0)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,0)/DECIMAL_64, decimal64Val 1, decimalVal 1) -> 4:decimal(11,0)/DECIMAL_64) -> 7:decimal(11,0), ConvertDecimal64ToDecimal(col 5:decimal(11,0)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,0)/DECIMAL_64, decimal64Val 2, decimalVal 2) -> 5:decimal(11,0)/DECIMAL_64) -> 8:decimal(11,0)) -> 6:decimal(11,0)
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -604,8 +604,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -613,9 +613,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: member:decimal(10,0), attr:decimal(10,0)
+ dataColumns: member:decimal(10,0)/DECIMAL_64, attr:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, decimal(11,0), decimal(11,0), decimal(11,0)]
+ scratchColumnTypeNames: [bigint, decimal(11,0)/DECIMAL_64, decimal(11,0)/DECIMAL_64, decimal(11,0), decimal(11,0), decimal(11,0)]
Stage: Stage-0
Fetch Operator
@@ -660,15 +660,15 @@ STAGE PLANS:
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:member:decimal(10,0), 1:attr:decimal(10,0), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:member:decimal(10,0)/DECIMAL_64, 1:attr:decimal(10,0)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: CASE WHEN ((member = 1)) THEN (1) ELSE ((attr + 2)) END (type: decimal(11,0))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [6]
- selectExpressions: IfExprColumnCondExpr(col 3:boolean, col 4:decimal(1,0)col 5:decimal(11,0))(children: VectorUDFAdaptor((member = 1)) -> 3:boolean, ConstantVectorExpression(val 1) -> 4:decimal(1,0), DecimalColAddDecimalScalar(col 1:decimal(10,0), val 2) -> 5:decimal(11,0)) -> 6:decimal(11,0)
+ projectedOutputColumnNums: [8]
+ selectExpressions: VectorUDFAdaptor(CASE WHEN ((member = 1)) THEN (1) ELSE ((attr + 2)) END)(children: VectorUDFAdaptor((member = 1)) -> 6:boolean, ConvertDecimal64ToDecimal(col 7:decimal(11,0)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,0)/DECIMAL_64, decimal64Val 2, decimalVal 2) -> 7:decimal(11,0)/DECIMAL_64) -> 9:decimal(11,0)) -> 8:decimal(11,0)
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -684,8 +684,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -693,9 +693,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: member:decimal(10,0), attr:decimal(10,0)
+ dataColumns: member:decimal(10,0)/DECIMAL_64, attr:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, decimal(1,0), decimal(11,0), decimal(11,0)]
+ scratchColumnTypeNames: [bigint, decimal(1,0), decimal(11,0)/DECIMAL_64, bigint, decimal(11,0)/DECIMAL_64, decimal(11,0), decimal(11,0)]
Stage: Stage-0
Fetch Operator
@@ -740,15 +740,15 @@ STAGE PLANS:
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:member:decimal(10,0), 1:attr:decimal(10,0), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:member:decimal(10,0)/DECIMAL_64, 1:attr:decimal(10,0)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE (2) END (type: decimal(11,0))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [6]
- selectExpressions: IfExprCondExprColumn(col 3:boolean, col 4:decimal(11,0), col 5:decimal(1,0))(children: VectorUDFAdaptor((member = 1)) -> 3:boolean, DecimalColAddDecimalScalar(col 1:decimal(10,0), val 1) -> 4:decimal(11,0), ConstantVectorExpression(val 2) -> 5:decimal(1,0)) -> 6:decimal(11,0)
+ projectedOutputColumnNums: [8]
+ selectExpressions: VectorUDFAdaptor(CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE (2) END)(children: VectorUDFAdaptor((member = 1)) -> 6:boolean, ConvertDecimal64ToDecimal(col 7:decimal(11,0)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,0)/DECIMAL_64, decimal64Val 1, decimalVal 1) -> 7:decimal(11,0)/DECIMAL_64) -> 9:decimal(11,0)) -> 8:decimal(11,0)
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -764,8 +764,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -773,9 +773,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: member:decimal(10,0), attr:decimal(10,0)
+ dataColumns: member:decimal(10,0)/DECIMAL_64, attr:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, decimal(11,0), decimal(1,0), decimal(11,0)]
+ scratchColumnTypeNames: [bigint, decimal(11,0)/DECIMAL_64, decimal(1,0), bigint, decimal(11,0)/DECIMAL_64, decimal(11,0), decimal(11,0)]
Stage: Stage-0
Fetch Operator
@@ -862,8 +862,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -942,8 +942,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1022,8 +1022,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorized_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/vectorized_mapjoin.q.out
index 4a80422..c17290a 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_mapjoin.q.out
@@ -53,8 +53,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -137,8 +137,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorized_math_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_math_funcs.q.out b/ql/src/test/results/clientpositive/spark/vectorized_math_funcs.q.out
index 4fbdd2e..024ff40 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_math_funcs.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_math_funcs.q.out
@@ -152,8 +152,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out
index 3abce03..a035d0d 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out
@@ -36,8 +36,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -64,8 +64,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -128,8 +128,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
index 278164b..121c112 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
@@ -165,8 +165,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -381,8 +381,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -423,8 +423,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -633,8 +633,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -789,8 +789,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1004,8 +1004,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1222,8 +1222,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1442,8 +1442,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1484,8 +1484,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1663,8 +1663,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1700,8 +1700,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2275,8 +2275,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2489,8 +2489,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2772,8 +2772,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2990,8 +2990,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3032,8 +3032,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3282,8 +3282,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3517,8 +3517,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3774,8 +3774,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4228,8 +4228,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4541,8 +4541,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4849,8 +4849,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5167,8 +5167,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5494,8 +5494,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5791,8 +5791,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out b/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out
index 3cbcf2c..86ea785 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out
@@ -58,8 +58,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -99,8 +99,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out b/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out
index 2a229d2..39e01ad 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out
@@ -78,8 +78,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
[44/67] [abbrv] hive git commit: HIVE-19725: Add ability to dump
non-native tables in replication metadata dump (Mahesh Kumar Behera,
reviewed by Sankar Hariappan)
Posted by se...@apache.org.
HIVE-19725: Add ability to dump non-native tables in replication metadata dump (Mahesh Kumar Behera, reviewed by Sankar Hariappan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6a16a71c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6a16a71c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6a16a71c
Branch: refs/heads/master-txnstats
Commit: 6a16a71ce99ff5d2f7bfa69cfcb475d4adc9873f
Parents: 4ec256c
Author: Sankar Hariappan <sa...@apache.org>
Authored: Mon Jun 18 06:23:41 2018 -0700
Committer: Sankar Hariappan <sa...@apache.org>
Committed: Mon Jun 18 06:23:41 2018 -0700
----------------------------------------------------------------------
.../hadoop/hive/ql/parse/TestExportImport.java | 44 +++++++++++++++++++-
...TestReplicationScenariosAcrossInstances.java | 29 ++++++++++++-
.../hadoop/hive/ql/parse/repl/dump/Utils.java | 3 +-
3 files changed, 73 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/6a16a71c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
index 67b74c2..53d13d8 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java
@@ -30,9 +30,12 @@ import org.junit.Test;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.util.HashMap;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
public class TestExportImport {
@@ -122,4 +125,43 @@ public class TestExportImport {
.verifyResults(new String[] { "1", "2" });
}
+
+ @Test
+ public void testExportNonNativeTable() throws Throwable {
+ String path = "hdfs:///tmp/" + dbName + "/";
+ String exportPath = path + "1/";
+ String exportMetaPath = exportPath + "/Meta";
+ String tableName = testName.getMethodName();
+ String createTableQuery =
+ "CREATE TABLE " + tableName + " ( serde_id bigint COMMENT 'from deserializer', name string "
+ + "COMMENT 'from deserializer', slib string COMMENT 'from deserializer') "
+ + "ROW FORMAT SERDE 'org.apache.hive.storage.jdbc.JdbcSerDe' "
+ + "STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' "
+ + "WITH SERDEPROPERTIES ('serialization.format'='1') "
+ + "TBLPROPERTIES ( "
+ + "'hive.sql.database.type'='METASTORE', "
+ + "'hive.sql.query'='SELECT \"SERDE_ID\", \"NAME\", \"SLIB\" FROM \"SERDES\"')";
+
+ srcHiveWarehouse.run("use " + dbName)
+ .run(createTableQuery)
+ .runFailure("export table " + tableName + " to '" + exportPath + "'")
+ .run("export table " + tableName + " to '" + exportMetaPath + "'" + " for metadata replication('1')");
+
+ destHiveWarehouse.run("use " + replDbName)
+ .runFailure("import table " + tableName + " from '" + exportPath + "'")
+ .run("show tables")
+ .verifyFailure(new String[] {tableName})
+ .run("import table " + tableName + " from '" + exportMetaPath + "'")
+ .run("show tables")
+ .verifyResult(tableName);
+
+ // check physical path
+ Path checkPath = new Path(exportPath);
+ checkPath = new Path(checkPath, EximUtil.DATA_PATH_NAME);
+ FileSystem fs = checkPath.getFileSystem(srcHiveWarehouse.hiveConf);
+ assertFalse(fs.exists(checkPath));
+ checkPath = new Path(exportMetaPath);
+ checkPath = new Path(checkPath, EximUtil.METADATA_NAME);
+ assertTrue(fs.exists(checkPath));
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/6a16a71c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
index 26e308c..0f67174 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
@@ -797,7 +797,7 @@ public class TestReplicationScenariosAcrossInstances {
}
@Test
- public void shouldNotCreateDirectoryForNonNativeTableInDumpDirectory() throws Throwable {
+ public void testShouldNotCreateDirectoryForNonNativeTableInDumpDirectory() throws Throwable {
String createTableQuery =
"CREATE TABLE custom_serdes( serde_id bigint COMMENT 'from deserializer', name string "
+ "COMMENT 'from deserializer', slib string COMMENT 'from deserializer') "
@@ -835,6 +835,33 @@ public class TestReplicationScenariosAcrossInstances {
}
}
+ @Test
+ public void testShouldDumpMetaDataForNonNativeTableIfSetMeataDataOnly() throws Throwable {
+ String tableName = testName.getMethodName() + "_table";
+ String createTableQuery =
+ "CREATE TABLE " + tableName + " ( serde_id bigint COMMENT 'from deserializer', name string "
+ + "COMMENT 'from deserializer', slib string COMMENT 'from deserializer') "
+ + "ROW FORMAT SERDE 'org.apache.hive.storage.jdbc.JdbcSerDe' "
+ + "STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' "
+ + "WITH SERDEPROPERTIES ('serialization.format'='1') "
+ + "TBLPROPERTIES ( "
+ + "'hive.sql.database.type'='METASTORE', "
+ + "'hive.sql.query'='SELECT \"SERDE_ID\", \"NAME\", \"SLIB\" FROM \"SERDES\"')";
+
+ WarehouseInstance.Tuple bootstrapTuple = primary
+ .run("use " + primaryDbName)
+ .run(createTableQuery)
+ .dump(primaryDbName, null, Collections.singletonList("'hive.repl.dump.metadata.only'='true'"));
+
+ // Bootstrap load in replica
+ replica.load(replicatedDbName, bootstrapTuple.dumpLocation)
+ .status(replicatedDbName)
+ .verifyResult(bootstrapTuple.lastReplicationId)
+ .run("use " + replicatedDbName)
+ .run("show tables")
+ .verifyResult(tableName);
+ }
+
private void verifyIfCkptSet(Map<String, String> props, String dumpDir) {
assertTrue(props.containsKey(ReplUtils.REPL_CHECKPOINT_KEY));
assertTrue(props.get(ReplUtils.REPL_CHECKPOINT_KEY).equals(dumpDir));
http://git-wip-us.apache.org/repos/asf/hive/blob/6a16a71c/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
index 14572ad..e356607 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
@@ -172,7 +172,8 @@ public class Utils {
return false;
}
- if (tableHandle.isNonNative()) {
+ // if its metadata only, then dump metadata of non native tables also.
+ if (tableHandle.isNonNative() && !replicationSpec.isMetadataOnly()) {
return false;
}
[53/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 11affe3..ccca4e9 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -305,6 +305,23 @@ class SchemaVersionState:
"DELETED": 8,
}
+class IsolationLevelCompliance:
+ YES = 1
+ NO = 2
+ UNKNOWN = 3
+
+ _VALUES_TO_NAMES = {
+ 1: "YES",
+ 2: "NO",
+ 3: "UNKNOWN",
+ }
+
+ _NAMES_TO_VALUES = {
+ "YES": 1,
+ "NO": 2,
+ "UNKNOWN": 3,
+ }
+
class FunctionType:
JAVA = 1
@@ -4550,6 +4567,9 @@ class Table:
- creationMetadata
- catName
- ownerType
+ - txnId
+ - validWriteIdList
+ - isStatsCompliant
"""
thrift_spec = (
@@ -4572,9 +4592,12 @@ class Table:
(16, TType.STRUCT, 'creationMetadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 16
(17, TType.STRING, 'catName', None, None, ), # 17
(18, TType.I32, 'ownerType', None, 1, ), # 18
+ (19, TType.I64, 'txnId', None, -1, ), # 19
+ (20, TType.STRING, 'validWriteIdList', None, None, ), # 20
+ (21, TType.I32, 'isStatsCompliant', None, None, ), # 21
)
- def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None, catName=None, ownerType=thrift_spec[18][4],):
+ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None, catName=None, ownerType=thrift_spec[18][4], txnId=thrift_spec[19][4], validWriteIdList=None, isStatsCompliant=None,):
self.tableName = tableName
self.dbName = dbName
self.owner = owner
@@ -4593,6 +4616,9 @@ class Table:
self.creationMetadata = creationMetadata
self.catName = catName
self.ownerType = ownerType
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -4708,6 +4734,21 @@ class Table:
self.ownerType = iprot.readI32()
else:
iprot.skip(ftype)
+ elif fid == 19:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 20:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 21:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -4797,6 +4838,18 @@ class Table:
oprot.writeFieldBegin('ownerType', TType.I32, 18)
oprot.writeI32(self.ownerType)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 19)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 20)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 21)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -4824,6 +4877,9 @@ class Table:
value = (value * 31) ^ hash(self.creationMetadata)
value = (value * 31) ^ hash(self.catName)
value = (value * 31) ^ hash(self.ownerType)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -4849,6 +4905,9 @@ class Partition:
- parameters
- privileges
- catName
+ - txnId
+ - validWriteIdList
+ - isStatsCompliant
"""
thrift_spec = (
@@ -4862,9 +4921,12 @@ class Partition:
(7, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 7
(8, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 8
(9, TType.STRING, 'catName', None, None, ), # 9
+ (10, TType.I64, 'txnId', None, -1, ), # 10
+ (11, TType.STRING, 'validWriteIdList', None, None, ), # 11
+ (12, TType.I32, 'isStatsCompliant', None, None, ), # 12
)
- def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None,):
+ def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None, txnId=thrift_spec[10][4], validWriteIdList=None, isStatsCompliant=None,):
self.values = values
self.dbName = dbName
self.tableName = tableName
@@ -4874,6 +4936,9 @@ class Partition:
self.parameters = parameters
self.privileges = privileges
self.catName = catName
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -4942,6 +5007,21 @@ class Partition:
self.catName = iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 10:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 11:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 12:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -4995,6 +5075,18 @@ class Partition:
oprot.writeFieldBegin('catName', TType.STRING, 9)
oprot.writeString(self.catName)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 10)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 11)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 12)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -5013,6 +5105,9 @@ class Partition:
value = (value * 31) ^ hash(self.parameters)
value = (value * 31) ^ hash(self.privileges)
value = (value * 31) ^ hash(self.catName)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -5346,6 +5441,9 @@ class PartitionSpec:
- sharedSDPartitionSpec
- partitionList
- catName
+ - txnId
+ - validWriteIdList
+ - isStatsCompliant
"""
thrift_spec = (
@@ -5356,15 +5454,21 @@ class PartitionSpec:
(4, TType.STRUCT, 'sharedSDPartitionSpec', (PartitionSpecWithSharedSD, PartitionSpecWithSharedSD.thrift_spec), None, ), # 4
(5, TType.STRUCT, 'partitionList', (PartitionListComposingSpec, PartitionListComposingSpec.thrift_spec), None, ), # 5
(6, TType.STRING, 'catName', None, None, ), # 6
+ (7, TType.I64, 'txnId', None, -1, ), # 7
+ (8, TType.STRING, 'validWriteIdList', None, None, ), # 8
+ (9, TType.I32, 'isStatsCompliant', None, None, ), # 9
)
- def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None, catName=None,):
+ def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None, catName=None, txnId=thrift_spec[7][4], validWriteIdList=None, isStatsCompliant=None,):
self.dbName = dbName
self.tableName = tableName
self.rootPath = rootPath
self.sharedSDPartitionSpec = sharedSDPartitionSpec
self.partitionList = partitionList
self.catName = catName
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -5407,6 +5511,21 @@ class PartitionSpec:
self.catName = iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 7:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 8:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 9:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -5441,6 +5560,18 @@ class PartitionSpec:
oprot.writeFieldBegin('catName', TType.STRING, 6)
oprot.writeString(self.catName)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 7)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 9)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -5456,6 +5587,9 @@ class PartitionSpec:
value = (value * 31) ^ hash(self.sharedSDPartitionSpec)
value = (value * 31) ^ hash(self.partitionList)
value = (value * 31) ^ hash(self.catName)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -6841,17 +6975,26 @@ class ColumnStatistics:
Attributes:
- statsDesc
- statsObj
+ - txnId
+ - validWriteIdList
+ - isStatsCompliant
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'statsDesc', (ColumnStatisticsDesc, ColumnStatisticsDesc.thrift_spec), None, ), # 1
(2, TType.LIST, 'statsObj', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 2
+ (3, TType.I64, 'txnId', None, -1, ), # 3
+ (4, TType.STRING, 'validWriteIdList', None, None, ), # 4
+ (5, TType.I32, 'isStatsCompliant', None, None, ), # 5
)
- def __init__(self, statsDesc=None, statsObj=None,):
+ def __init__(self, statsDesc=None, statsObj=None, txnId=thrift_spec[3][4], validWriteIdList=None, isStatsCompliant=None,):
self.statsDesc = statsDesc
self.statsObj = statsObj
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -6879,6 +7022,21 @@ class ColumnStatistics:
iprot.readListEnd()
else:
iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -6900,6 +7058,18 @@ class ColumnStatistics:
iter243.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 3)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 5)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -6915,6 +7085,9 @@ class ColumnStatistics:
value = 17
value = (value * 31) ^ hash(self.statsDesc)
value = (value * 31) ^ hash(self.statsObj)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -6933,17 +7106,20 @@ class AggrStats:
Attributes:
- colStats
- partsFound
+ - isStatsCompliant
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 1
(2, TType.I64, 'partsFound', None, None, ), # 2
+ (3, TType.I32, 'isStatsCompliant', None, None, ), # 3
)
- def __init__(self, colStats=None, partsFound=None,):
+ def __init__(self, colStats=None, partsFound=None, isStatsCompliant=None,):
self.colStats = colStats
self.partsFound = partsFound
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -6970,6 +7146,11 @@ class AggrStats:
self.partsFound = iprot.readI64()
else:
iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -6991,6 +7172,10 @@ class AggrStats:
oprot.writeFieldBegin('partsFound', TType.I64, 2)
oprot.writeI64(self.partsFound)
oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 3)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -7006,6 +7191,7 @@ class AggrStats:
value = 17
value = (value * 31) ^ hash(self.colStats)
value = (value * 31) ^ hash(self.partsFound)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -7024,17 +7210,23 @@ class SetPartitionsStatsRequest:
Attributes:
- colStats
- needMerge
+ - txnId
+ - validWriteIdList
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatistics, ColumnStatistics.thrift_spec)), None, ), # 1
(2, TType.BOOL, 'needMerge', None, None, ), # 2
+ (3, TType.I64, 'txnId', None, -1, ), # 3
+ (4, TType.STRING, 'validWriteIdList', None, None, ), # 4
)
- def __init__(self, colStats=None, needMerge=None,):
+ def __init__(self, colStats=None, needMerge=None, txnId=thrift_spec[3][4], validWriteIdList=None,):
self.colStats = colStats
self.needMerge = needMerge
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -7061,6 +7253,16 @@ class SetPartitionsStatsRequest:
self.needMerge = iprot.readBool()
else:
iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -7082,6 +7284,14 @@ class SetPartitionsStatsRequest:
oprot.writeFieldBegin('needMerge', TType.BOOL, 2)
oprot.writeBool(self.needMerge)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 3)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -7095,6 +7305,8 @@ class SetPartitionsStatsRequest:
value = 17
value = (value * 31) ^ hash(self.colStats)
value = (value * 31) ^ hash(self.needMerge)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
return value
def __repr__(self):
@@ -9133,15 +9345,18 @@ class TableStatsResult:
"""
Attributes:
- tableStats
+ - isStatsCompliant
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'tableStats', (TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec)), None, ), # 1
+ (2, TType.I32, 'isStatsCompliant', None, None, ), # 2
)
- def __init__(self, tableStats=None,):
+ def __init__(self, tableStats=None, isStatsCompliant=None,):
self.tableStats = tableStats
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -9163,6 +9378,11 @@ class TableStatsResult:
iprot.readListEnd()
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -9180,6 +9400,10 @@ class TableStatsResult:
iter380.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 2)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -9192,6 +9416,7 @@ class TableStatsResult:
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.tableStats)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -9209,15 +9434,18 @@ class PartitionsStatsResult:
"""
Attributes:
- partStats
+ - isStatsCompliant
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'partStats', (TType.STRING,None,TType.LIST,(TType.STRUCT,(ColumnStatisticsObj, ColumnStatisticsObj.thrift_spec))), None, ), # 1
+ (2, TType.I32, 'isStatsCompliant', None, None, ), # 2
)
- def __init__(self, partStats=None,):
+ def __init__(self, partStats=None, isStatsCompliant=None,):
self.partStats = partStats
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -9245,6 +9473,11 @@ class PartitionsStatsResult:
iprot.readMapEnd()
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -9266,6 +9499,10 @@ class PartitionsStatsResult:
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 2)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -9278,6 +9515,7 @@ class PartitionsStatsResult:
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.partStats)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -9298,6 +9536,8 @@ class TableStatsRequest:
- tblName
- colNames
- catName
+ - txnId
+ - validWriteIdList
"""
thrift_spec = (
@@ -9306,13 +9546,17 @@ class TableStatsRequest:
(2, TType.STRING, 'tblName', None, None, ), # 2
(3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3
(4, TType.STRING, 'catName', None, None, ), # 4
+ (5, TType.I64, 'txnId', None, -1, ), # 5
+ (6, TType.STRING, 'validWriteIdList', None, None, ), # 6
)
- def __init__(self, dbName=None, tblName=None, colNames=None, catName=None,):
+ def __init__(self, dbName=None, tblName=None, colNames=None, catName=None, txnId=thrift_spec[5][4], validWriteIdList=None,):
self.dbName = dbName
self.tblName = tblName
self.colNames = colNames
self.catName = catName
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -9348,6 +9592,16 @@ class TableStatsRequest:
self.catName = iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -9377,6 +9631,14 @@ class TableStatsRequest:
oprot.writeFieldBegin('catName', TType.STRING, 4)
oprot.writeString(self.catName)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 5)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -9396,6 +9658,8 @@ class TableStatsRequest:
value = (value * 31) ^ hash(self.tblName)
value = (value * 31) ^ hash(self.colNames)
value = (value * 31) ^ hash(self.catName)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
return value
def __repr__(self):
@@ -9417,6 +9681,8 @@ class PartitionsStatsRequest:
- colNames
- partNames
- catName
+ - txnId
+ - validWriteIdList
"""
thrift_spec = (
@@ -9426,14 +9692,18 @@ class PartitionsStatsRequest:
(3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3
(4, TType.LIST, 'partNames', (TType.STRING,None), None, ), # 4
(5, TType.STRING, 'catName', None, None, ), # 5
+ (6, TType.I64, 'txnId', None, -1, ), # 6
+ (7, TType.STRING, 'validWriteIdList', None, None, ), # 7
)
- def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None,):
+ def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None, txnId=thrift_spec[6][4], validWriteIdList=None,):
self.dbName = dbName
self.tblName = tblName
self.colNames = colNames
self.partNames = partNames
self.catName = catName
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -9479,6 +9749,16 @@ class PartitionsStatsRequest:
self.catName = iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 7:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -9515,6 +9795,14 @@ class PartitionsStatsRequest:
oprot.writeFieldBegin('catName', TType.STRING, 5)
oprot.writeString(self.catName)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 6)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -9537,6 +9825,8 @@ class PartitionsStatsRequest:
value = (value * 31) ^ hash(self.colNames)
value = (value * 31) ^ hash(self.partNames)
value = (value * 31) ^ hash(self.catName)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
return value
def __repr__(self):
@@ -9554,15 +9844,18 @@ class AddPartitionsResult:
"""
Attributes:
- partitions
+ - isStatsCompliant
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 1
+ (2, TType.I32, 'isStatsCompliant', None, None, ), # 2
)
- def __init__(self, partitions=None,):
+ def __init__(self, partitions=None, isStatsCompliant=None,):
self.partitions = partitions
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -9584,6 +9877,11 @@ class AddPartitionsResult:
iprot.readListEnd()
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -9601,6 +9899,10 @@ class AddPartitionsResult:
iter424.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 2)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -9611,6 +9913,7 @@ class AddPartitionsResult:
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.partitions)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -9633,6 +9936,8 @@ class AddPartitionsRequest:
- ifNotExists
- needResult
- catName
+ - txnId
+ - validWriteIdList
"""
thrift_spec = (
@@ -9643,15 +9948,19 @@ class AddPartitionsRequest:
(4, TType.BOOL, 'ifNotExists', None, None, ), # 4
(5, TType.BOOL, 'needResult', None, True, ), # 5
(6, TType.STRING, 'catName', None, None, ), # 6
+ (7, TType.I64, 'txnId', None, -1, ), # 7
+ (8, TType.STRING, 'validWriteIdList', None, None, ), # 8
)
- def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4], catName=None,):
+ def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4], catName=None, txnId=thrift_spec[7][4], validWriteIdList=None,):
self.dbName = dbName
self.tblName = tblName
self.parts = parts
self.ifNotExists = ifNotExists
self.needResult = needResult
self.catName = catName
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -9698,6 +10007,16 @@ class AddPartitionsRequest:
self.catName = iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 7:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 8:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -9735,6 +10054,14 @@ class AddPartitionsRequest:
oprot.writeFieldBegin('catName', TType.STRING, 6)
oprot.writeString(self.catName)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 7)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -9758,6 +10085,8 @@ class AddPartitionsRequest:
value = (value * 31) ^ hash(self.ifNotExists)
value = (value * 31) ^ hash(self.needResult)
value = (value * 31) ^ hash(self.catName)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
return value
def __repr__(self):
@@ -16609,6 +16938,8 @@ class GetTableRequest:
- tblName
- capabilities
- catName
+ - txnId
+ - validWriteIdList
"""
thrift_spec = (
@@ -16617,13 +16948,17 @@ class GetTableRequest:
(2, TType.STRING, 'tblName', None, None, ), # 2
(3, TType.STRUCT, 'capabilities', (ClientCapabilities, ClientCapabilities.thrift_spec), None, ), # 3
(4, TType.STRING, 'catName', None, None, ), # 4
+ (5, TType.I64, 'txnId', None, -1, ), # 5
+ (6, TType.STRING, 'validWriteIdList', None, None, ), # 6
)
- def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None,):
+ def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None, txnId=thrift_spec[5][4], validWriteIdList=None,):
self.dbName = dbName
self.tblName = tblName
self.capabilities = capabilities
self.catName = catName
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -16655,6 +16990,16 @@ class GetTableRequest:
self.catName = iprot.readString()
else:
iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -16681,6 +17026,14 @@ class GetTableRequest:
oprot.writeFieldBegin('catName', TType.STRING, 4)
oprot.writeString(self.catName)
oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 5)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -16698,6 +17051,8 @@ class GetTableRequest:
value = (value * 31) ^ hash(self.tblName)
value = (value * 31) ^ hash(self.capabilities)
value = (value * 31) ^ hash(self.catName)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
return value
def __repr__(self):
@@ -16715,15 +17070,18 @@ class GetTableResult:
"""
Attributes:
- table
+ - isStatsCompliant
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'table', (Table, Table.thrift_spec), None, ), # 1
+ (2, TType.I32, 'isStatsCompliant', None, None, ), # 2
)
- def __init__(self, table=None,):
+ def __init__(self, table=None, isStatsCompliant=None,):
self.table = table
+ self.isStatsCompliant = isStatsCompliant
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -16740,6 +17098,11 @@ class GetTableResult:
self.table.read(iprot)
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.isStatsCompliant = iprot.readI32()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -16754,6 +17117,10 @@ class GetTableResult:
oprot.writeFieldBegin('table', TType.STRUCT, 1)
self.table.write(oprot)
oprot.writeFieldEnd()
+ if self.isStatsCompliant is not None:
+ oprot.writeFieldBegin('isStatsCompliant', TType.I32, 2)
+ oprot.writeI32(self.isStatsCompliant)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -16766,6 +17133,7 @@ class GetTableResult:
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.table)
+ value = (value * 31) ^ hash(self.isStatsCompliant)
return value
def __repr__(self):
@@ -21666,6 +22034,200 @@ class GetRuntimeStatsRequest:
def __ne__(self, other):
return not (self == other)
+class AlterPartitionsRequest:
+ """
+ Attributes:
+ - dbName
+ - tableName
+ - partitions
+ - environmentContext
+ - txnId
+ - validWriteIdList
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'dbName', None, None, ), # 1
+ (2, TType.STRING, 'tableName', None, None, ), # 2
+ (3, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 3
+ (4, TType.STRUCT, 'environmentContext', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 4
+ (5, TType.I64, 'txnId', None, -1, ), # 5
+ (6, TType.STRING, 'validWriteIdList', None, None, ), # 6
+ )
+
+ def __init__(self, dbName=None, tableName=None, partitions=None, environmentContext=None, txnId=thrift_spec[5][4], validWriteIdList=None,):
+ self.dbName = dbName
+ self.tableName = tableName
+ self.partitions = partitions
+ self.environmentContext = environmentContext
+ self.txnId = txnId
+ self.validWriteIdList = validWriteIdList
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.dbName = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.tableName = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.LIST:
+ self.partitions = []
+ (_etype819, _size816) = iprot.readListBegin()
+ for _i820 in xrange(_size816):
+ _elem821 = Partition()
+ _elem821.read(iprot)
+ self.partitions.append(_elem821)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRUCT:
+ self.environmentContext = EnvironmentContext()
+ self.environmentContext.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.I64:
+ self.txnId = iprot.readI64()
+ else:
+ iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.STRING:
+ self.validWriteIdList = iprot.readString()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('AlterPartitionsRequest')
+ if self.dbName is not None:
+ oprot.writeFieldBegin('dbName', TType.STRING, 1)
+ oprot.writeString(self.dbName)
+ oprot.writeFieldEnd()
+ if self.tableName is not None:
+ oprot.writeFieldBegin('tableName', TType.STRING, 2)
+ oprot.writeString(self.tableName)
+ oprot.writeFieldEnd()
+ if self.partitions is not None:
+ oprot.writeFieldBegin('partitions', TType.LIST, 3)
+ oprot.writeListBegin(TType.STRUCT, len(self.partitions))
+ for iter822 in self.partitions:
+ iter822.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ if self.environmentContext is not None:
+ oprot.writeFieldBegin('environmentContext', TType.STRUCT, 4)
+ self.environmentContext.write(oprot)
+ oprot.writeFieldEnd()
+ if self.txnId is not None:
+ oprot.writeFieldBegin('txnId', TType.I64, 5)
+ oprot.writeI64(self.txnId)
+ oprot.writeFieldEnd()
+ if self.validWriteIdList is not None:
+ oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6)
+ oprot.writeString(self.validWriteIdList)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ if self.dbName is None:
+ raise TProtocol.TProtocolException(message='Required field dbName is unset!')
+ if self.tableName is None:
+ raise TProtocol.TProtocolException(message='Required field tableName is unset!')
+ if self.partitions is None:
+ raise TProtocol.TProtocolException(message='Required field partitions is unset!')
+ if self.environmentContext is None:
+ raise TProtocol.TProtocolException(message='Required field environmentContext is unset!')
+ return
+
+
+ def __hash__(self):
+ value = 17
+ value = (value * 31) ^ hash(self.dbName)
+ value = (value * 31) ^ hash(self.tableName)
+ value = (value * 31) ^ hash(self.partitions)
+ value = (value * 31) ^ hash(self.environmentContext)
+ value = (value * 31) ^ hash(self.txnId)
+ value = (value * 31) ^ hash(self.validWriteIdList)
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class AlterPartitionsResponse:
+
+ thrift_spec = (
+ )
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('AlterPartitionsResponse')
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __hash__(self):
+ value = 17
+ return value
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
class MetaException(TException):
"""
Attributes:
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index fc640d0..7b5132c 100644
--- a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -141,6 +141,14 @@ module SchemaVersionState
VALID_VALUES = Set.new([INITIATED, START_REVIEW, CHANGES_REQUIRED, REVIEWED, ENABLED, DISABLED, ARCHIVED, DELETED]).freeze
end
+module IsolationLevelCompliance
+ YES = 1
+ NO = 2
+ UNKNOWN = 3
+ VALUE_MAP = {1 => "YES", 2 => "NO", 3 => "UNKNOWN"}
+ VALID_VALUES = Set.new([YES, NO, UNKNOWN]).freeze
+end
+
module FunctionType
JAVA = 1
VALUE_MAP = {1 => "JAVA"}
@@ -1062,6 +1070,9 @@ class Table
CREATIONMETADATA = 16
CATNAME = 17
OWNERTYPE = 18
+ TXNID = 19
+ VALIDWRITEIDLIST = 20
+ ISSTATSCOMPLIANT = 21
FIELDS = {
TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
@@ -1081,7 +1092,10 @@ class Table
REWRITEENABLED => {:type => ::Thrift::Types::BOOL, :name => 'rewriteEnabled', :optional => true},
CREATIONMETADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creationMetadata', :class => ::CreationMetadata, :optional => true},
CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
- OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :default => 1, :optional => true, :enum_class => ::PrincipalType}
+ OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :default => 1, :optional => true, :enum_class => ::PrincipalType},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
@@ -1090,6 +1104,9 @@ class Table
unless @ownerType.nil? || ::PrincipalType::VALID_VALUES.include?(@ownerType)
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field ownerType!')
end
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -1106,6 +1123,9 @@ class Partition
PARAMETERS = 7
PRIVILEGES = 8
CATNAME = 9
+ TXNID = 10
+ VALIDWRITEIDLIST = 11
+ ISSTATSCOMPLIANT = 12
FIELDS = {
VALUES => {:type => ::Thrift::Types::LIST, :name => 'values', :element => {:type => ::Thrift::Types::STRING}},
@@ -1116,12 +1136,18 @@ class Partition
SD => {:type => ::Thrift::Types::STRUCT, :name => 'sd', :class => ::StorageDescriptor},
PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}},
PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true},
- CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+ CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
def validate
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -1195,6 +1221,9 @@ class PartitionSpec
SHAREDSDPARTITIONSPEC = 4
PARTITIONLIST = 5
CATNAME = 6
+ TXNID = 7
+ VALIDWRITEIDLIST = 8
+ ISSTATSCOMPLIANT = 9
FIELDS = {
DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
@@ -1202,12 +1231,18 @@ class PartitionSpec
ROOTPATH => {:type => ::Thrift::Types::STRING, :name => 'rootPath'},
SHAREDSDPARTITIONSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'sharedSDPartitionSpec', :class => ::PartitionSpecWithSharedSD, :optional => true},
PARTITIONLIST => {:type => ::Thrift::Types::STRUCT, :name => 'partitionList', :class => ::PartitionListComposingSpec, :optional => true},
- CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+ CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
def validate
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -1547,10 +1582,16 @@ class ColumnStatistics
include ::Thrift::Struct, ::Thrift::Struct_Union
STATSDESC = 1
STATSOBJ = 2
+ TXNID = 3
+ VALIDWRITEIDLIST = 4
+ ISSTATSCOMPLIANT = 5
FIELDS = {
STATSDESC => {:type => ::Thrift::Types::STRUCT, :name => 'statsDesc', :class => ::ColumnStatisticsDesc},
- STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}
+ STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
@@ -1558,6 +1599,9 @@ class ColumnStatistics
def validate
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field statsDesc is unset!') unless @statsDesc
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field statsObj is unset!') unless @statsObj
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -1567,10 +1611,12 @@ class AggrStats
include ::Thrift::Struct, ::Thrift::Struct_Union
COLSTATS = 1
PARTSFOUND = 2
+ ISSTATSCOMPLIANT = 3
FIELDS = {
COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}},
- PARTSFOUND => {:type => ::Thrift::Types::I64, :name => 'partsFound'}
+ PARTSFOUND => {:type => ::Thrift::Types::I64, :name => 'partsFound'},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
@@ -1578,6 +1624,9 @@ class AggrStats
def validate
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colStats is unset!') unless @colStats
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partsFound is unset!') unless @partsFound
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -1587,10 +1636,14 @@ class SetPartitionsStatsRequest
include ::Thrift::Struct, ::Thrift::Struct_Union
COLSTATS = 1
NEEDMERGE = 2
+ TXNID = 3
+ VALIDWRITEIDLIST = 4
FIELDS = {
COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatistics}},
- NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true}
+ NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
}
def struct_fields; FIELDS; end
@@ -2055,15 +2108,20 @@ end
class TableStatsResult
include ::Thrift::Struct, ::Thrift::Struct_Union
TABLESTATS = 1
+ ISSTATSCOMPLIANT = 2
FIELDS = {
- TABLESTATS => {:type => ::Thrift::Types::LIST, :name => 'tableStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}
+ TABLESTATS => {:type => ::Thrift::Types::LIST, :name => 'tableStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
def validate
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableStats is unset!') unless @tableStats
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -2072,15 +2130,20 @@ end
class PartitionsStatsResult
include ::Thrift::Struct, ::Thrift::Struct_Union
PARTSTATS = 1
+ ISSTATSCOMPLIANT = 2
FIELDS = {
- PARTSTATS => {:type => ::Thrift::Types::MAP, :name => 'partStats', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}}
+ PARTSTATS => {:type => ::Thrift::Types::MAP, :name => 'partStats', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
def validate
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partStats is unset!') unless @partStats
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -2092,12 +2155,16 @@ class TableStatsRequest
TBLNAME = 2
COLNAMES = 3
CATNAME = 4
+ TXNID = 5
+ VALIDWRITEIDLIST = 6
FIELDS = {
DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}},
- CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+ CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
}
def struct_fields; FIELDS; end
@@ -2118,13 +2185,17 @@ class PartitionsStatsRequest
COLNAMES = 3
PARTNAMES = 4
CATNAME = 5
+ TXNID = 6
+ VALIDWRITEIDLIST = 7
FIELDS = {
DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}},
PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}},
- CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+ CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
}
def struct_fields; FIELDS; end
@@ -2142,14 +2213,19 @@ end
class AddPartitionsResult
include ::Thrift::Struct, ::Thrift::Struct_Union
PARTITIONS = 1
+ ISSTATSCOMPLIANT = 2
FIELDS = {
- PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}, :optional => true}
+ PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}, :optional => true},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
def validate
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -2163,6 +2239,8 @@ class AddPartitionsRequest
IFNOTEXISTS = 4
NEEDRESULT = 5
CATNAME = 6
+ TXNID = 7
+ VALIDWRITEIDLIST = 8
FIELDS = {
DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
@@ -2170,7 +2248,9 @@ class AddPartitionsRequest
PARTS => {:type => ::Thrift::Types::LIST, :name => 'parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}},
IFNOTEXISTS => {:type => ::Thrift::Types::BOOL, :name => 'ifNotExists'},
NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true},
- CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+ CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
}
def struct_fields; FIELDS; end
@@ -3731,12 +3811,16 @@ class GetTableRequest
TBLNAME = 2
CAPABILITIES = 3
CATNAME = 4
+ TXNID = 5
+ VALIDWRITEIDLIST = 6
FIELDS = {
DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true},
- CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+ CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
}
def struct_fields; FIELDS; end
@@ -3752,15 +3836,20 @@ end
class GetTableResult
include ::Thrift::Struct, ::Thrift::Struct_Union
TABLE = 1
+ ISSTATSCOMPLIANT = 2
FIELDS = {
- TABLE => {:type => ::Thrift::Types::STRUCT, :name => 'table', :class => ::Table}
+ TABLE => {:type => ::Thrift::Types::STRUCT, :name => 'table', :class => ::Table},
+ ISSTATSCOMPLIANT => {:type => ::Thrift::Types::I32, :name => 'isStatsCompliant', :optional => true, :enum_class => ::IsolationLevelCompliance}
}
def struct_fields; FIELDS; end
def validate
raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field table is unset!') unless @table
+ unless @isStatsCompliant.nil? || ::IsolationLevelCompliance::VALID_VALUES.include?(@isStatsCompliant)
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field isStatsCompliant!')
+ end
end
::Thrift::Struct.generate_accessors self
@@ -4923,6 +5012,51 @@ class GetRuntimeStatsRequest
::Thrift::Struct.generate_accessors self
end
+class AlterPartitionsRequest
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+ DBNAME = 1
+ TABLENAME = 2
+ PARTITIONS = 3
+ ENVIRONMENTCONTEXT = 4
+ TXNID = 5
+ VALIDWRITEIDLIST = 6
+
+ FIELDS = {
+ DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+ TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
+ PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}},
+ ENVIRONMENTCONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environmentContext', :class => ::EnvironmentContext},
+ TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+ VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partitions is unset!') unless @partitions
+ raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field environmentContext is unset!') unless @environmentContext
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
+class AlterPartitionsResponse
+ include ::Thrift::Struct, ::Thrift::Struct_Union
+
+ FIELDS = {
+
+ }
+
+ def struct_fields; FIELDS; end
+
+ def validate
+ end
+
+ ::Thrift::Struct.generate_accessors self
+end
+
class MetaException < ::Thrift::Exception
include ::Thrift::Struct, ::Thrift::Struct_Union
def initialize(message=nil)
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index bbf3f12..3987ee9 100644
--- a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -1416,20 +1416,21 @@ module ThriftHiveMetastore
return
end
- def alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context)
- send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context)
- recv_alter_partitions_with_environment_context()
+ def alter_partitions_with_environment_context(req)
+ send_alter_partitions_with_environment_context(req)
+ return recv_alter_partitions_with_environment_context()
end
- def send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context)
- send_message('alter_partitions_with_environment_context', Alter_partitions_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :new_parts => new_parts, :environment_context => environment_context)
+ def send_alter_partitions_with_environment_context(req)
+ send_message('alter_partitions_with_environment_context', Alter_partitions_with_environment_context_args, :req => req)
end
def recv_alter_partitions_with_environment_context()
result = receive_message(Alter_partitions_with_environment_context_result)
+ return result.success unless result.success.nil?
raise result.o1 unless result.o1.nil?
raise result.o2 unless result.o2.nil?
- return
+ raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'alter_partitions_with_environment_context failed: unknown result')
end
def alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context)
@@ -4580,7 +4581,7 @@ module ThriftHiveMetastore
args = read_args(iprot, Alter_partitions_with_environment_context_args)
result = Alter_partitions_with_environment_context_result.new()
begin
- @handler.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context)
+ result.success = @handler.alter_partitions_with_environment_context(args.req)
rescue ::InvalidOperationException => o1
result.o1 = o1
rescue ::MetaException => o2
@@ -9272,16 +9273,10 @@ module ThriftHiveMetastore
class Alter_partitions_with_environment_context_args
include ::Thrift::Struct, ::Thrift::Struct_Union
- DB_NAME = 1
- TBL_NAME = 2
- NEW_PARTS = 3
- ENVIRONMENT_CONTEXT = 4
+ REQ = 1
FIELDS = {
- DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'},
- TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'},
- NEW_PARTS => {:type => ::Thrift::Types::LIST, :name => 'new_parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}},
- ENVIRONMENT_CONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environment_context', :class => ::EnvironmentContext}
+ REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::AlterPartitionsRequest}
}
def struct_fields; FIELDS; end
@@ -9294,10 +9289,12 @@ module ThriftHiveMetastore
class Alter_partitions_with_environment_context_result
include ::Thrift::Struct, ::Thrift::Struct_Union
+ SUCCESS = 0
O1 = 1
O2 = 2
FIELDS = {
+ SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::AlterPartitionsResponse},
O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::InvalidOperationException},
O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index 050dca9..010870d 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@ -197,6 +197,6 @@ public interface AlterHandler extends Configurable {
*/
List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String catName,
final String dbname, final String name, final List<Partition> new_parts,
- EnvironmentContext environmentContext,IHMSHandler handler)
+ EnvironmentContext environmentContext, long txnId, String writeIdList, IHMSHandler handler)
throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index c2da6d3..5b70307 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -144,7 +144,7 @@ public class HiveAlterHandler implements AlterHandler {
// check if table with the new name already exists
if (!newTblName.equals(name) || !newDbName.equals(dbname)) {
- if (msdb.getTable(catName, newDbName, newTblName) != null) {
+ if (msdb.getTable(catName, newDbName, newTblName, -1, null) != null) {
throw new InvalidOperationException("new table " + newDbName
+ "." + newTblName + " already exists");
}
@@ -153,7 +153,7 @@ public class HiveAlterHandler implements AlterHandler {
msdb.openTransaction();
// get old table
- oldt = msdb.getTable(catName, dbname, name);
+ oldt = msdb.getTable(catName, dbname, name, -1, null);
if (oldt == null) {
throw new InvalidOperationException("table " +
TableName.getQualified(catName, dbname, name) + " doesn't exist");
@@ -296,7 +296,8 @@ public class HiveAlterHandler implements AlterHandler {
for (Partition part : partBatch) {
partValues.add(part.getValues());
}
- msdb.alterPartitions(catName, newDbName, newTblName, partValues, partBatch);
+ msdb.alterPartitions(
+ catName, newDbName, newTblName, partValues, partBatch, -1, null);
}
}
@@ -453,7 +454,7 @@ public class HiveAlterHandler implements AlterHandler {
try {
msdb.openTransaction();
- Table tbl = msdb.getTable(catName, dbname, name);
+ Table tbl = msdb.getTable(catName, dbname, name, -1, null);
if (tbl == null) {
throw new InvalidObjectException(
"Unable to alter partition because table or database does not exist.");
@@ -509,7 +510,7 @@ public class HiveAlterHandler implements AlterHandler {
Database db;
try {
msdb.openTransaction();
- Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name);
+ Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name, -1, null);
if (tbl == null) {
throw new InvalidObjectException(
"Unable to alter partition because table or database does not exist.");
@@ -658,14 +659,15 @@ public class HiveAlterHandler implements AlterHandler {
EnvironmentContext environmentContext)
throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
return alterPartitions(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, new_parts,
- environmentContext, null);
+ environmentContext, -1, null, null);
}
@Override
public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String catName,
final String dbname, final String name,
final List<Partition> new_parts,
- EnvironmentContext environmentContext, IHMSHandler handler)
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList, IHMSHandler handler)
throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
List<Partition> oldParts = new ArrayList<>();
List<List<String>> partValsList = new ArrayList<>();
@@ -678,7 +680,7 @@ public class HiveAlterHandler implements AlterHandler {
try {
msdb.openTransaction();
- Table tbl = msdb.getTable(catName, dbname, name);
+ Table tbl = msdb.getTable(catName, dbname, name, -1, null);
if (tbl == null) {
throw new InvalidObjectException(
"Unable to alter partitions because table or database does not exist.");
@@ -713,7 +715,7 @@ public class HiveAlterHandler implements AlterHandler {
}
}
- msdb.alterPartitions(catName, dbname, name, partValsList, new_parts);
+ msdb.alterPartitions(catName, dbname, name, partValsList, new_parts, txnId, writeIdList);
Iterator<Partition> oldPartsIt = oldParts.iterator();
for (Partition newPart : new_parts) {
Partition oldPart;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index e88f9a5..248c4b3 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -2437,7 +2437,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
private boolean is_table_exists(RawStore ms, String catName, String dbname, String name)
throws MetaException {
- return (ms.getTable(catName, dbname, name) != null);
+ return (ms.getTable(catName, dbname, name, -1, null) != null);
}
private boolean drop_table_core(final RawStore ms, final String catName, final String dbname,
@@ -2599,7 +2599,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
tableDnsPath = wh.getDnsPath(tablePath);
}
List<Path> partPaths = new ArrayList<>();
- Table tbl = ms.getTable(catName, dbName, tableName);
+ Table tbl = ms.getTable(catName, dbName, tableName, -1, null);
// call dropPartition on each of the table's partitions to follow the
// procedure for cleanly dropping partitions.
@@ -2842,7 +2842,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
public Table get_table(final String dbname, final String name) throws MetaException,
NoSuchObjectException {
String[] parsedDbName = parseDbName(dbname, conf);
- return getTableInternal(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null);
+ return getTableInternal(
+ parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null, -1, null);
}
@Override
@@ -2850,11 +2851,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
NoSuchObjectException {
String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf);
return new GetTableResult(getTableInternal(catName, req.getDbName(), req.getTblName(),
- req.getCapabilities()));
+ req.getCapabilities(), req.getTxnId(), req.getValidWriteIdList()));
}
private Table getTableInternal(String catName, String dbname, String name,
- ClientCapabilities capabilities) throws MetaException, NoSuchObjectException {
+ ClientCapabilities capabilities, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
if (isInTest) {
assertClientHasCapability(capabilities, ClientCapability.TEST_CAPABILITY,
"Hive tests", "get_table_req");
@@ -2864,7 +2866,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
startTableFunction("get_table", catName, dbname, name);
Exception ex = null;
try {
- t = get_table_core(catName, dbname, name);
+ t = get_table_core(catName, dbname, name, txnId, writeIdList);
if (MetaStoreUtils.isInsertOnlyTableParam(t.getParameters())) {
assertClientHasCapability(capabilities, ClientCapability.INSERT_ONLY_TABLES,
"insert-only tables", "get_table_req");
@@ -2899,11 +2901,25 @@ public class HiveMetaStore extends ThriftHiveMetastore {
}
@Override
- public Table get_table_core(final String catName, final String dbname, final String name)
+ public Table get_table_core(
+ final String catName,
+ final String dbname,
+ final String name)
+ throws MetaException, NoSuchObjectException {
+ return get_table_core(catName, dbname, name, -1, null);
+ }
+
+ @Override
+ public Table get_table_core(
+ final String catName,
+ final String dbname,
+ final String name,
+ final long txnId,
+ final String writeIdList)
throws MetaException, NoSuchObjectException {
Table t = null;
try {
- t = getMS().getTable(catName, dbname, name);
+ t = getMS().getTable(catName, dbname, name, txnId, writeIdList);
if (t == null) {
throw new NoSuchObjectException(TableName.getQualified(catName, dbname, name) +
" table not found");
@@ -3085,7 +3101,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
MetaStoreUtils.validatePartitionNameCharacters(part_vals, partitionValidationPattern);
- tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName());
+ tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName(), -1, null);
if (tbl == null) {
throw new InvalidObjectException(
"Unable to add partition because table or database do not exist");
@@ -3279,7 +3295,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
try {
ms.openTransaction();
- tbl = ms.getTable(catName, dbName, tblName);
+ tbl = ms.getTable(catName, dbName, tblName, -1, null);
if (tbl == null) {
throw new InvalidObjectException("Unable to add partitions because "
+ TableName.getQualified(catName, dbName, tblName) +
@@ -3550,7 +3566,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
Database db = null;
try {
ms.openTransaction();
- tbl = ms.getTable(catName, dbName, tblName);
+ tbl = ms.getTable(catName, dbName, tblName, -1, null);
if (tbl == null) {
throw new InvalidObjectException("Unable to add partitions because "
+ "database or table " + dbName + "." + tblName + " does not exist");
@@ -3804,7 +3820,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
}
try {
ms.openTransaction();
- tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName());
+ tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName(), -1, null);
if (tbl == null) {
throw new InvalidObjectException(
"Unable to add partition because table or database do not exist");
@@ -3925,14 +3941,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
ms.openTransaction();
Table destinationTable =
- ms.getTable(parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName);
+ ms.getTable(
+ parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName, -1, null);
if (destinationTable == null) {
throw new MetaException( "The destination table " +
TableName.getQualified(parsedDestDbName[CAT_NAME],
parsedDestDbName[DB_NAME], destTableName) + " not found");
}
Table sourceTable =
- ms.getTable(parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME], sourceTableName);
+ ms.getTable(
+ parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME], sourceTableName, -1, null);
if (sourceTable == null) {
throw new MetaException("The source table " +
TableName.getQualified(parsedSourceDbName[CAT_NAME],
@@ -4109,7 +4127,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
try {
ms.openTransaction();
part = ms.getPartition(catName, db_name, tbl_name, part_vals);
- tbl = get_table_core(catName, db_name, tbl_name);
+ tbl = get_table_core(catName, db_name, tbl_name, -1, null);
isExternalTbl = isExternal(tbl);
firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this));
mustPurge = isMustPurge(envContext, tbl);
@@ -4837,7 +4855,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
Table table = null;
if (!listeners.isEmpty()) {
if (table == null) {
- table = getMS().getTable(catName, db_name, tbl_name);
+ table = getMS().getTable(catName, db_name, tbl_name, -1, null);
}
MetaStoreListenerNotifier.notifyEvent(listeners,
@@ -4866,12 +4884,23 @@ public class HiveMetaStore extends ThriftHiveMetastore {
public void alter_partitions(final String db_name, final String tbl_name,
final List<Partition> new_parts)
throws TException {
- alter_partitions_with_environment_context(db_name, tbl_name, new_parts, null);
+ alter_partitions_with_environment_context(
+ db_name, tbl_name, new_parts, null, -1, null);
}
@Override
- public void alter_partitions_with_environment_context(final String db_name, final String tbl_name,
- final List<Partition> new_parts, EnvironmentContext environmentContext)
+ public AlterPartitionsResponse alter_partitions_with_environment_context(
+ AlterPartitionsRequest req)
+ throws TException {
+ alter_partitions_with_environment_context(
+ req.getDbName(), req.getTableName(), req.getPartitions(), req.getEnvironmentContext(),
+ req.getTxnId(), req.getValidWriteIdList());
+ return new AlterPartitionsResponse();
+ }
+
+ private void alter_partitions_with_environment_context(final String db_name, final String tbl_name,
+ final List<Partition> new_parts, EnvironmentContext environmentContext,
+ long txnId, String writeIdList)
throws TException {
String[] parsedDbName = parseDbName(db_name, conf);
@@ -4895,7 +4924,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
firePreEvent(new PreAlterPartitionEvent(parsedDbName[DB_NAME], tbl_name, null, tmpPart, this));
}
oldParts = alterHandler.alterPartitions(getMS(), wh, parsedDbName[CAT_NAME],
- parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, this);
+ parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, txnId, writeIdList, this);
Iterator<Partition> olditr = oldParts.iterator();
// Only fetch the table if we have a listener that needs it.
Table table = null;
@@ -4909,7 +4938,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
}
if (table == null) {
- table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name);
+ table = getMS().getTable(
+ parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, -1, null);
}
if (!listeners.isEmpty()) {
@@ -5329,7 +5359,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
private List<String> getPartValsFromName(RawStore ms, String catName, String dbName,
String tblName, String partName)
throws MetaException, InvalidObjectException {
- Table t = ms.getTable(catName, dbName, tblName);
+ Table t = ms.getTable(catName, dbName, tblName, -1, null);
if (t == null) {
throw new InvalidObjectException(dbName + "." + tblName
+ " table not found");
@@ -5584,7 +5614,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
ColumnStatistics statsObj = null;
try {
statsObj = getMS().getTableColumnStatistics(
- parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(colName));
+ parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(colName),
+ -1, null);
if (statsObj != null) {
assert statsObj.getStatsObjSize() <= 1;
}
@@ -5608,7 +5639,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
lowerCaseColNames.add(colName.toLowerCase());
}
try {
- ColumnStatistics cs = getMS().getTableColumnStatistics(catName, dbName, tblName, lowerCaseColNames);
+ ColumnStatistics cs = getMS().getTableColumnStatistics(
+ catName, dbName, tblName, lowerCaseColNames,
+ request.getTxnId(), request.getValidWriteIdList());
result = new TableStatsResult((cs == null || cs.getStatsObj() == null)
? Lists.newArrayList() : cs.getStatsObj());
} finally {
@@ -7315,8 +7348,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
AggrStats aggrStats = null;
try {
- aggrStats = new AggrStats(getMS().get_aggr_stats_for(catName, dbName, tblName,
- lowerCasePartNames, lowerCaseColNames));
+ aggrStats = getMS().get_aggr_stats_for(catName, dbName, tblName,
+ lowerCasePartNames, lowerCaseColNames, request.getTxnId(),
+ request.getValidWriteIdList());
return aggrStats;
} finally {
endFunction("get_aggr_stats_for", aggrStats == null, null, request.getTblName());
@@ -7350,7 +7384,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
} else {
if (request.isSetNeedMerge() && request.isNeedMerge()) {
// one single call to get all column stats
- ColumnStatistics csOld = getMS().getTableColumnStatistics(catName, dbName, tableName, colNames);
+ ColumnStatistics csOld =
+ getMS().getTableColumnStatistics(
+ catName, dbName, tableName, colNames,
+ request.getTxnId(), request.getValidWriteIdList());
Table t = getTable(catName, dbName, tableName);
// we first use t.getParameters() to prune the stats
MetaStoreUtils.getMergableCols(firstColStats, t.getParameters());
@@ -7390,8 +7427,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
// a single call to get all column stats for all partitions
List<String> partitionNames = new ArrayList<>();
partitionNames.addAll(newStatsMap.keySet());
- List<ColumnStatistics> csOlds = getMS().getPartitionColumnStatistics(catName, dbName,
- tableName, partitionNames, colNames);
+ List<ColumnStatistics> csOlds =
+ getMS().getPartitionColumnStatistics(
+ catName, dbName, tableName, partitionNames, colNames,
+ request.getTxnId(), request.getValidWriteIdList());
if (newStatsMap.values().size() != csOlds.size()) {
// some of the partitions miss stats.
LOG.debug("Some of the partitions miss stats.");
@@ -7405,7 +7444,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
mapToPart.put(partitionNames.get(index), partitions.get(index));
}
}
- Table t = getTable(catName, dbName, tableName);
+ Table t = getTable(catName, dbName, tableName,
+ request.getTxnId(), request.getValidWriteIdList());
for (Entry<String, ColumnStatistics> entry : newStatsMap.entrySet()) {
ColumnStatistics csNew = entry.getValue();
ColumnStatistics csOld = oldStatsMap.get(entry.getKey());
@@ -7432,7 +7472,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
private Table getTable(String catName, String dbName, String tableName)
throws MetaException, InvalidObjectException {
- Table t = getMS().getTable(catName, dbName, tableName);
+ return getTable(catName, dbName, tableName, -1, null);
+ }
+
+ private Table getTable(String catName, String dbName, String tableName,
+ long txnId, String writeIdList)
+ throws MetaException, InvalidObjectException {
+ Table t = getMS().getTable(catName, dbName, tableName, txnId, writeIdList);
if (t == null) {
throw new InvalidObjectException(TableName.getQualified(catName, dbName, tableName)
+ " table not found");
[55/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
index a29ebb7..9033e9a 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -203,6 +203,17 @@ final class SchemaVersionState {
);
}
+final class IsolationLevelCompliance {
+ const YES = 1;
+ const NO = 2;
+ const UNKNOWN = 3;
+ static public $__names = array(
+ 1 => 'YES',
+ 2 => 'NO',
+ 3 => 'UNKNOWN',
+ );
+}
+
final class FunctionType {
const JAVA = 1;
static public $__names = array(
@@ -6517,6 +6528,18 @@ class Table {
* @var int
*/
public $ownerType = 1;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -6609,6 +6632,18 @@ class Table {
'var' => 'ownerType',
'type' => TType::I32,
),
+ 19 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 20 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
+ 21 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
@@ -6666,6 +6701,15 @@ class Table {
if (isset($vals['ownerType'])) {
$this->ownerType = $vals['ownerType'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -6841,6 +6885,27 @@ class Table {
$xfer += $input->skip($ftype);
}
break;
+ case 19:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 20:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 21:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -6978,6 +7043,21 @@ class Table {
$xfer += $output->writeI32($this->ownerType);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 19);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 20);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 21);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -7024,6 +7104,18 @@ class Partition {
* @var string
*/
public $catName = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -7078,6 +7170,18 @@ class Partition {
'var' => 'catName',
'type' => TType::STRING,
),
+ 10 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 11 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
+ 12 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
@@ -7108,6 +7212,15 @@ class Partition {
if (isset($vals['catName'])) {
$this->catName = $vals['catName'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -7218,6 +7331,27 @@ class Partition {
$xfer += $input->skip($ftype);
}
break;
+ case 10:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 11:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 12:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -7307,6 +7441,21 @@ class Partition {
$xfer += $output->writeString($this->catName);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 10);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 11);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 12);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -7830,6 +7979,18 @@ class PartitionSpec {
* @var string
*/
public $catName = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -7860,6 +8021,18 @@ class PartitionSpec {
'var' => 'catName',
'type' => TType::STRING,
),
+ 7 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 8 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
+ 9 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
@@ -7881,6 +8054,15 @@ class PartitionSpec {
if (isset($vals['catName'])) {
$this->catName = $vals['catName'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -7947,6 +8129,27 @@ class PartitionSpec {
$xfer += $input->skip($ftype);
}
break;
+ case 7:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 8:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 9:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -7996,6 +8199,21 @@ class PartitionSpec {
$xfer += $output->writeString($this->catName);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 7);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 9);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -9894,6 +10112,18 @@ class ColumnStatistics {
* @var \metastore\ColumnStatisticsObj[]
*/
public $statsObj = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -9912,6 +10142,18 @@ class ColumnStatistics {
'class' => '\metastore\ColumnStatisticsObj',
),
),
+ 3 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 4 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
+ 5 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
@@ -9921,6 +10163,15 @@ class ColumnStatistics {
if (isset($vals['statsObj'])) {
$this->statsObj = $vals['statsObj'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -9969,6 +10220,27 @@ class ColumnStatistics {
$xfer += $input->skip($ftype);
}
break;
+ case 3:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 4:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 5:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -10007,6 +10279,21 @@ class ColumnStatistics {
}
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 3);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 5);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -10025,6 +10312,10 @@ class AggrStats {
* @var int
*/
public $partsFound = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -10042,6 +10333,10 @@ class AggrStats {
'var' => 'partsFound',
'type' => TType::I64,
),
+ 3 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
@@ -10051,6 +10346,9 @@ class AggrStats {
if (isset($vals['partsFound'])) {
$this->partsFound = $vals['partsFound'];
}
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -10098,6 +10396,13 @@ class AggrStats {
$xfer += $input->skip($ftype);
}
break;
+ case 3:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -10133,6 +10438,11 @@ class AggrStats {
$xfer += $output->writeI64($this->partsFound);
$xfer += $output->writeFieldEnd();
}
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 3);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -10151,6 +10461,14 @@ class SetPartitionsStatsRequest {
* @var bool
*/
public $needMerge = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -10168,6 +10486,14 @@ class SetPartitionsStatsRequest {
'var' => 'needMerge',
'type' => TType::BOOL,
),
+ 3 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 4 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
);
}
if (is_array($vals)) {
@@ -10177,6 +10503,12 @@ class SetPartitionsStatsRequest {
if (isset($vals['needMerge'])) {
$this->needMerge = $vals['needMerge'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
}
}
@@ -10224,6 +10556,20 @@ class SetPartitionsStatsRequest {
$xfer += $input->skip($ftype);
}
break;
+ case 3:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 4:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -10259,6 +10605,16 @@ class SetPartitionsStatsRequest {
$xfer += $output->writeBool($this->needMerge);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 3);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -13010,6 +13366,10 @@ class TableStatsResult {
* @var \metastore\ColumnStatisticsObj[]
*/
public $tableStats = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -13023,12 +13383,19 @@ class TableStatsResult {
'class' => '\metastore\ColumnStatisticsObj',
),
),
+ 2 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
if (isset($vals['tableStats'])) {
$this->tableStats = $vals['tableStats'];
}
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -13069,6 +13436,13 @@ class TableStatsResult {
$xfer += $input->skip($ftype);
}
break;
+ case 2:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -13099,6 +13473,11 @@ class TableStatsResult {
}
$xfer += $output->writeFieldEnd();
}
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 2);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -13113,6 +13492,10 @@ class PartitionsStatsResult {
* @var array
*/
public $partStats = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -13134,12 +13517,19 @@ class PartitionsStatsResult {
),
),
),
+ 2 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
if (isset($vals['partStats'])) {
$this->partStats = $vals['partStats'];
}
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -13193,6 +13583,13 @@ class PartitionsStatsResult {
$xfer += $input->skip($ftype);
}
break;
+ case 2:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -13233,6 +13630,11 @@ class PartitionsStatsResult {
}
$xfer += $output->writeFieldEnd();
}
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 2);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -13259,6 +13661,14 @@ class TableStatsRequest {
* @var string
*/
public $catName = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -13283,6 +13693,14 @@ class TableStatsRequest {
'var' => 'catName',
'type' => TType::STRING,
),
+ 5 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 6 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
);
}
if (is_array($vals)) {
@@ -13298,6 +13716,12 @@ class TableStatsRequest {
if (isset($vals['catName'])) {
$this->catName = $vals['catName'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
}
}
@@ -13358,6 +13782,20 @@ class TableStatsRequest {
$xfer += $input->skip($ftype);
}
break;
+ case 5:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 6:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -13403,6 +13841,16 @@ class TableStatsRequest {
$xfer += $output->writeString($this->catName);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 5);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -13433,6 +13881,14 @@ class PartitionsStatsRequest {
* @var string
*/
public $catName = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -13465,6 +13921,14 @@ class PartitionsStatsRequest {
'var' => 'catName',
'type' => TType::STRING,
),
+ 6 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 7 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
);
}
if (is_array($vals)) {
@@ -13483,6 +13947,12 @@ class PartitionsStatsRequest {
if (isset($vals['catName'])) {
$this->catName = $vals['catName'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
}
}
@@ -13560,6 +14030,20 @@ class PartitionsStatsRequest {
$xfer += $input->skip($ftype);
}
break;
+ case 6:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 7:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -13622,6 +14106,16 @@ class PartitionsStatsRequest {
$xfer += $output->writeString($this->catName);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 6);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -13636,6 +14130,10 @@ class AddPartitionsResult {
* @var \metastore\Partition[]
*/
public $partitions = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -13649,12 +14147,19 @@ class AddPartitionsResult {
'class' => '\metastore\Partition',
),
),
+ 2 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
if (isset($vals['partitions'])) {
$this->partitions = $vals['partitions'];
}
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -13695,6 +14200,13 @@ class AddPartitionsResult {
$xfer += $input->skip($ftype);
}
break;
+ case 2:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -13725,6 +14237,11 @@ class AddPartitionsResult {
}
$xfer += $output->writeFieldEnd();
}
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 2);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -13759,6 +14276,14 @@ class AddPartitionsRequest {
* @var string
*/
public $catName = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -13792,6 +14317,14 @@ class AddPartitionsRequest {
'var' => 'catName',
'type' => TType::STRING,
),
+ 7 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 8 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
);
}
if (is_array($vals)) {
@@ -13813,6 +14346,12 @@ class AddPartitionsRequest {
if (isset($vals['catName'])) {
$this->catName = $vals['catName'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
}
}
@@ -13888,6 +14427,20 @@ class AddPartitionsRequest {
$xfer += $input->skip($ftype);
}
break;
+ case 7:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 8:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -13943,6 +14496,16 @@ class AddPartitionsRequest {
$xfer += $output->writeString($this->catName);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 7);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -23596,6 +24159,14 @@ class GetTableRequest {
* @var string
*/
public $catName = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -23617,6 +24188,14 @@ class GetTableRequest {
'var' => 'catName',
'type' => TType::STRING,
),
+ 5 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 6 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
);
}
if (is_array($vals)) {
@@ -23632,6 +24211,12 @@ class GetTableRequest {
if (isset($vals['catName'])) {
$this->catName = $vals['catName'];
}
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
}
}
@@ -23683,6 +24268,20 @@ class GetTableRequest {
$xfer += $input->skip($ftype);
}
break;
+ case 5:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 6:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -23719,6 +24318,16 @@ class GetTableRequest {
$xfer += $output->writeString($this->catName);
$xfer += $output->writeFieldEnd();
}
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 5);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -23733,6 +24342,10 @@ class GetTableResult {
* @var \metastore\Table
*/
public $table = null;
+ /**
+ * @var int
+ */
+ public $isStatsCompliant = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -23742,12 +24355,19 @@ class GetTableResult {
'type' => TType::STRUCT,
'class' => '\metastore\Table',
),
+ 2 => array(
+ 'var' => 'isStatsCompliant',
+ 'type' => TType::I32,
+ ),
);
}
if (is_array($vals)) {
if (isset($vals['table'])) {
$this->table = $vals['table'];
}
+ if (isset($vals['isStatsCompliant'])) {
+ $this->isStatsCompliant = $vals['isStatsCompliant'];
+ }
}
}
@@ -23778,6 +24398,13 @@ class GetTableResult {
$xfer += $input->skip($ftype);
}
break;
+ case 2:
+ if ($ftype == TType::I32) {
+ $xfer += $input->readI32($this->isStatsCompliant);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -23799,6 +24426,11 @@ class GetTableResult {
$xfer += $this->table->write($output);
$xfer += $output->writeFieldEnd();
}
+ if ($this->isStatsCompliant !== null) {
+ $xfer += $output->writeFieldBegin('isStatsCompliant', TType::I32, 2);
+ $xfer += $output->writeI32($this->isStatsCompliant);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
@@ -30306,6 +30938,279 @@ class GetRuntimeStatsRequest {
}
+class AlterPartitionsRequest {
+ static $_TSPEC;
+
+ /**
+ * @var string
+ */
+ public $dbName = null;
+ /**
+ * @var string
+ */
+ public $tableName = null;
+ /**
+ * @var \metastore\Partition[]
+ */
+ public $partitions = null;
+ /**
+ * @var \metastore\EnvironmentContext
+ */
+ public $environmentContext = null;
+ /**
+ * @var int
+ */
+ public $txnId = -1;
+ /**
+ * @var string
+ */
+ public $validWriteIdList = null;
+
+ public function __construct($vals=null) {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ 1 => array(
+ 'var' => 'dbName',
+ 'type' => TType::STRING,
+ ),
+ 2 => array(
+ 'var' => 'tableName',
+ 'type' => TType::STRING,
+ ),
+ 3 => array(
+ 'var' => 'partitions',
+ 'type' => TType::LST,
+ 'etype' => TType::STRUCT,
+ 'elem' => array(
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\Partition',
+ ),
+ ),
+ 4 => array(
+ 'var' => 'environmentContext',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\EnvironmentContext',
+ ),
+ 5 => array(
+ 'var' => 'txnId',
+ 'type' => TType::I64,
+ ),
+ 6 => array(
+ 'var' => 'validWriteIdList',
+ 'type' => TType::STRING,
+ ),
+ );
+ }
+ if (is_array($vals)) {
+ if (isset($vals['dbName'])) {
+ $this->dbName = $vals['dbName'];
+ }
+ if (isset($vals['tableName'])) {
+ $this->tableName = $vals['tableName'];
+ }
+ if (isset($vals['partitions'])) {
+ $this->partitions = $vals['partitions'];
+ }
+ if (isset($vals['environmentContext'])) {
+ $this->environmentContext = $vals['environmentContext'];
+ }
+ if (isset($vals['txnId'])) {
+ $this->txnId = $vals['txnId'];
+ }
+ if (isset($vals['validWriteIdList'])) {
+ $this->validWriteIdList = $vals['validWriteIdList'];
+ }
+ }
+ }
+
+ public function getName() {
+ return 'AlterPartitionsRequest';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ case 1:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->dbName);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 2:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->tableName);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 3:
+ if ($ftype == TType::LST) {
+ $this->partitions = array();
+ $_size820 = 0;
+ $_etype823 = 0;
+ $xfer += $input->readListBegin($_etype823, $_size820);
+ for ($_i824 = 0; $_i824 < $_size820; ++$_i824)
+ {
+ $elem825 = null;
+ $elem825 = new \metastore\Partition();
+ $xfer += $elem825->read($input);
+ $this->partitions []= $elem825;
+ }
+ $xfer += $input->readListEnd();
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 4:
+ if ($ftype == TType::STRUCT) {
+ $this->environmentContext = new \metastore\EnvironmentContext();
+ $xfer += $this->environmentContext->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 5:
+ if ($ftype == TType::I64) {
+ $xfer += $input->readI64($this->txnId);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ case 6:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->validWriteIdList);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('AlterPartitionsRequest');
+ if ($this->dbName !== null) {
+ $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1);
+ $xfer += $output->writeString($this->dbName);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->tableName !== null) {
+ $xfer += $output->writeFieldBegin('tableName', TType::STRING, 2);
+ $xfer += $output->writeString($this->tableName);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->partitions !== null) {
+ if (!is_array($this->partitions)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('partitions', TType::LST, 3);
+ {
+ $output->writeListBegin(TType::STRUCT, count($this->partitions));
+ {
+ foreach ($this->partitions as $iter826)
+ {
+ $xfer += $iter826->write($output);
+ }
+ }
+ $output->writeListEnd();
+ }
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->environmentContext !== null) {
+ if (!is_object($this->environmentContext)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('environmentContext', TType::STRUCT, 4);
+ $xfer += $this->environmentContext->write($output);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->txnId !== null) {
+ $xfer += $output->writeFieldBegin('txnId', TType::I64, 5);
+ $xfer += $output->writeI64($this->txnId);
+ $xfer += $output->writeFieldEnd();
+ }
+ if ($this->validWriteIdList !== null) {
+ $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6);
+ $xfer += $output->writeString($this->validWriteIdList);
+ $xfer += $output->writeFieldEnd();
+ }
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
+class AlterPartitionsResponse {
+ static $_TSPEC;
+
+
+ public function __construct() {
+ if (!isset(self::$_TSPEC)) {
+ self::$_TSPEC = array(
+ );
+ }
+ }
+
+ public function getName() {
+ return 'AlterPartitionsResponse';
+ }
+
+ public function read($input)
+ {
+ $xfer = 0;
+ $fname = null;
+ $ftype = 0;
+ $fid = 0;
+ $xfer += $input->readStructBegin($fname);
+ while (true)
+ {
+ $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+ if ($ftype == TType::STOP) {
+ break;
+ }
+ switch ($fid)
+ {
+ default:
+ $xfer += $input->skip($ftype);
+ break;
+ }
+ $xfer += $input->readFieldEnd();
+ }
+ $xfer += $input->readStructEnd();
+ return $xfer;
+ }
+
+ public function write($output) {
+ $xfer = 0;
+ $xfer += $output->writeStructBegin('AlterPartitionsResponse');
+ $xfer += $output->writeFieldStop();
+ $xfer += $output->writeStructEnd();
+ return $xfer;
+ }
+
+}
+
class MetaException extends TException {
static $_TSPEC;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index 5402372..dbc54f8 100755
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@ -107,7 +107,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print(' get_partitions_by_names(string db_name, string tbl_name, names)')
print(' void alter_partition(string db_name, string tbl_name, Partition new_part)')
print(' void alter_partitions(string db_name, string tbl_name, new_parts)')
- print(' void alter_partitions_with_environment_context(string db_name, string tbl_name, new_parts, EnvironmentContext environment_context)')
+ print(' AlterPartitionsResponse alter_partitions_with_environment_context(AlterPartitionsRequest req)')
print(' void alter_partition_with_environment_context(string db_name, string tbl_name, Partition new_part, EnvironmentContext environment_context)')
print(' void rename_partition(string db_name, string tbl_name, part_vals, Partition new_part)')
print(' bool partition_name_has_valid_characters( part_vals, bool throw_exception)')
@@ -799,10 +799,10 @@ elif cmd == 'alter_partitions':
pp.pprint(client.alter_partitions(args[0],args[1],eval(args[2]),))
elif cmd == 'alter_partitions_with_environment_context':
- if len(args) != 4:
- print('alter_partitions_with_environment_context requires 4 args')
+ if len(args) != 1:
+ print('alter_partitions_with_environment_context requires 1 args')
sys.exit(1)
- pp.pprint(client.alter_partitions_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),))
+ pp.pprint(client.alter_partitions_with_environment_context(eval(args[0]),))
elif cmd == 'alter_partition_with_environment_context':
if len(args) != 4:
[24/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out
index 8e02351..8f0cc4d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out
@@ -50,22 +50,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToBoolean(t) (type: boolean)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToBoolean(col 0:decimal(18,9)) -> 2:boolean
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToBoolean(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:boolean
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: boolean)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -75,8 +75,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -84,9 +84,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(18,9), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -166,22 +166,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToByte(t) (type: tinyint)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:tinyint
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:tinyint
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: tinyint)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -191,8 +191,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -200,9 +200,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(18,9), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -282,22 +282,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToShort(t) (type: smallint)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:smallint
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:smallint
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: smallint)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -307,8 +307,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -316,9 +316,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(18,9), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -398,22 +398,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToInteger(t) (type: int)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:int
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:int
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -423,8 +423,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -432,9 +432,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(18,9), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -514,22 +514,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToLong(t) (type: bigint)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:bigint
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:bigint
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: bigint)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -539,8 +539,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -548,9 +548,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(18,9), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -630,22 +630,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToFloat(t) (type: float)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToFloat(col 0:decimal(18,9)) -> 2:float
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToFloat(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:float
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: float)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -655,8 +655,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -664,9 +664,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [double]
+ scratchColumnTypeNames: [decimal(18,9), double]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -746,22 +746,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToDouble(t) (type: double)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToDouble(col 0:decimal(18,9)) -> 2:double
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToDouble(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:double
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: double)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -771,8 +771,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -780,9 +780,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [double]
+ scratchColumnTypeNames: [decimal(18,9), double]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -862,22 +862,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToString(t) (type: string)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToString(col 0:decimal(18,9)) -> 2:string
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToString(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:string
Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -887,8 +887,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -896,9 +896,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [string]
+ scratchColumnTypeNames: [decimal(18,9), string]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -989,22 +989,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToBoolean(t) (type: boolean)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToBoolean(col 0:decimal(18,9)) -> 2:boolean
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToBoolean(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:boolean
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: boolean)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -1014,8 +1014,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1023,9 +1023,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(18,9), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -1105,22 +1105,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToByte(t) (type: tinyint)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:tinyint
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:tinyint
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: tinyint)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -1130,8 +1130,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1139,9 +1139,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(18,9), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -1221,22 +1221,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToShort(t) (type: smallint)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:smallint
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:smallint
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: smallint)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -1246,8 +1246,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1255,9 +1255,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(18,9), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -1337,22 +1337,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToInteger(t) (type: int)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:int
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:int
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -1362,8 +1362,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1371,9 +1371,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(18,9), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -1453,22 +1453,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToLong(t) (type: bigint)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:bigint
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:bigint
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: bigint)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -1478,8 +1478,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1487,9 +1487,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(18,9), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -1569,22 +1569,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToFloat(t) (type: float)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToFloat(col 0:decimal(18,9)) -> 2:float
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToFloat(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:float
Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: float)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -1594,8 +1594,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1603,9 +1603,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [double]
+ scratchColumnTypeNames: [decimal(18,9), double]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -1685,22 +1685,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToDouble(t) (type: double)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToDouble(col 0:decimal(18,9)) -> 2:double
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToDouble(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:double
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: double)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -1710,8 +1710,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1719,9 +1719,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [double]
+ scratchColumnTypeNames: [decimal(18,9), double]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -1801,22 +1801,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToString(t) (type: string)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2]
- selectExpressions: CastDecimalToString(col 0:decimal(18,9)) -> 2:string
+ projectedOutputColumnNums: [3]
+ selectExpressions: CastDecimalToString(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:string
Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -1826,8 +1826,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1835,9 +1835,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [string]
+ scratchColumnTypeNames: [decimal(18,9), string]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -1914,7 +1914,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: 3.14 (type: decimal(4,2))
outputColumnNames: _col0
@@ -1939,8 +1939,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1948,7 +1948,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: []
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: [decimal(4,2)]
@@ -1993,7 +1993,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: 3.14 (type: decimal(4,2))
outputColumnNames: _col0
@@ -2018,8 +2018,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2027,7 +2027,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: []
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: [decimal(4,2)]
@@ -2072,7 +2072,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: 1355944339.1234567 (type: decimal(30,8))
outputColumnNames: _col0
@@ -2097,8 +2097,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2106,7 +2106,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: []
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: [decimal(30,8)]
@@ -2151,7 +2151,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: 1 (type: decimal(10,0))
outputColumnNames: _col0
@@ -2176,8 +2176,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2185,7 +2185,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: []
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: [decimal(10,0)]
@@ -2221,7 +2221,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: 1 (type: decimal(10,0))
outputColumnNames: _col0
@@ -2246,8 +2246,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2255,7 +2255,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: []
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: [decimal(10,0)]
@@ -2300,7 +2300,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: 3 (type: decimal(10,0))
outputColumnNames: _col0
@@ -2325,8 +2325,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2334,7 +2334,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: []
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: [decimal(10,0)]
@@ -2379,7 +2379,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: 3 (type: decimal(10,0))
outputColumnNames: _col0
@@ -2404,8 +2404,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2413,7 +2413,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: []
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: [decimal(10,0)]
@@ -2458,7 +2458,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: 3 (type: decimal(10,0))
outputColumnNames: _col0
@@ -2483,8 +2483,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2492,7 +2492,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: []
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: [decimal(10,0)]
@@ -2537,7 +2537,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: 3 (type: decimal(10,0))
outputColumnNames: _col0
@@ -2562,8 +2562,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2571,7 +2571,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: []
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: [decimal(10,0)]
@@ -2616,7 +2616,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: 1 (type: decimal(20,19))
outputColumnNames: _col0
@@ -2641,8 +2641,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2650,7 +2650,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: []
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: [decimal(20,19)]
@@ -2695,7 +2695,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: 0.99999999999999999999 (type: decimal(20,20))
outputColumnNames: _col0
@@ -2720,8 +2720,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2729,7 +2729,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: []
- dataColumns: t:decimal(18,9)
+ dataColumns: t:decimal(18,9)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: [decimal(20,20)]
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_5.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_5.q.out
index 0bfd12e..5bea214 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_5.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_5.q.out
@@ -129,6 +129,44 @@ NULL
124.00000
125.20000
200.00000
+PREHOOK: query: explain SELECT cast(key as decimal) FROM DECIMAL_5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain SELECT cast(key as decimal) FROM DECIMAL_5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: decimal_5
+ Statistics: Num rows: 38 Data size: 4032 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: CAST( key AS decimal(10,0)) (type: decimal(10,0))
+ outputColumnNames: _col0
+ Statistics: Num rows: 38 Data size: 4032 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 38 Data size: 4032 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized, llap
+ LLAP IO: all inputs
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5
PREHOOK: type: QUERY
PREHOOK: Input: default@decimal_5
@@ -137,42 +175,42 @@ POSTHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5
POSTHOOK: type: QUERY
POSTHOOK: Input: default@decimal_5
#### A masked pattern was here ####
--4400
+-440000000
NULL
0
0
-100
-10
-1
-0
-0
-200
-20
-2
-0
-0
+10000000
+1000000
+100000
+10000
+1000
+20000000
+2000000
+200000
0
-0
-0
-0
-0
-0
-0
-1
-2
-3
--1
--1
--1
-1
-1
-124
-125
--1255
-3
-3
-3
-1
+20000
+2000
+30000
+33000
+33300
+-30000
+-33000
+-33300
+100000
+200000
+314000
+-112000
+-112000
+-112200
+112000
+112200
+12400000
+12520000
+-125549000
+314000
+314000
+314000
+100000
NULL
NULL
PREHOOK: query: SELECT cast(key as decimal(6,3)) FROM DECIMAL_5
@@ -187,38 +225,38 @@ NULL
NULL
0.000
0.000
+NULL
+NULL
100.000
10.000
1.000
-0.100
-0.010
+NULL
+NULL
200.000
-20.000
-2.000
0.000
-0.200
-0.020
-0.300
-0.330
-0.333
--0.300
--0.330
--0.333
-1.000
+20.000
2.000
-3.140
--1.120
--1.120
--1.122
-1.120
-1.122
-124.000
-125.200
-NULL
-3.140
-3.140
-3.140
-1.000
+30.000
+33.000
+33.300
+-30.000
+-33.000
+-33.300
+100.000
+200.000
+314.000
+-112.000
+-112.000
+-112.200
+112.000
+112.200
+NULL
+NULL
+NULL
+314.000
+314.000
+314.000
+100.000
NULL
NULL
PREHOOK: query: DROP TABLE DECIMAL_5_txt
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out
index 800a4ae..705bf8b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out
@@ -135,7 +135,7 @@ STAGE PLANS:
Statistics: Num rows: 27 Data size: 2684 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(10,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(10,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: key (type: decimal(10,5)), value (type: int)
outputColumnNames: _col0, _col1
@@ -159,8 +159,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -168,7 +168,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(10,5), value:int
+ dataColumns: key:decimal(10,5)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -276,7 +276,7 @@ STAGE PLANS:
Statistics: Num rows: 27 Data size: 3132 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(17,4), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(17,4)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: key (type: decimal(17,4)), value (type: int)
outputColumnNames: _col0, _col1
@@ -300,8 +300,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -309,7 +309,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(17,4), value:int
+ dataColumns: key:decimal(17,4)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -427,7 +427,7 @@ STAGE PLANS:
Statistics: Num rows: 27 Data size: 2576 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(10,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(10,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: CAST( key AS decimal(18,5)) (type: decimal(18,5))
outputColumnNames: _col0
@@ -435,7 +435,7 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [3]
- selectExpressions: CastDecimalToDecimal(col 0:decimal(10,5)) -> 3:decimal(18,5)
+ selectExpressions: ConvertDecimal64ToDecimal(col 0:decimal(18,5)/DECIMAL_64) -> 3:decimal(18,5)
Statistics: Num rows: 27 Data size: 2576 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: decimal(18,5))
@@ -452,8 +452,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -461,7 +461,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(10,5), value:int
+ dataColumns: key:decimal(10,5)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: [decimal(18,5)]
Map 4
@@ -471,7 +471,7 @@ STAGE PLANS:
Statistics: Num rows: 27 Data size: 3024 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(17,4), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(17,4)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: CAST( key AS decimal(18,5)) (type: decimal(18,5))
outputColumnNames: _col0
@@ -479,7 +479,7 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [3]
- selectExpressions: CastDecimalToDecimal(col 0:decimal(17,4)) -> 3:decimal(18,5)
+ selectExpressions: ConvertDecimal64ToDecimal(col 0:decimal(18,5)/DECIMAL_64) -> 3:decimal(18,5)
Statistics: Num rows: 27 Data size: 3024 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: decimal(18,5))
@@ -496,8 +496,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -505,7 +505,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: key:decimal(17,4), value:int
+ dataColumns: key:decimal(17,4)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: [decimal(18,5)]
Reducer 3
@@ -575,54 +575,54 @@ NULL
NULL
NULL
NULL
--1234567890.12350
+-123456789.01235
-4400.00000
--4400.00000
--1255.49000
-1255.49000
--1.12200
+-440.00000
+-125.54900
-1.12200
-1.12000
--1.12000
--0.33300
-0.33300
-0.30000
--0.30000
+-0.11220
+-0.11200
+-0.03330
+-0.03000
0.00000
0.00000
0.00000
0.00000
-0.33300
+0.03330
+0.10000
+0.10000
+0.11200
+0.11220
+0.20000
+0.31400
+0.31400
+0.31400
0.33300
1.00000
1.00000
1.00000
-1.00000
-1.12000
+1.07343
1.12000
1.12200
-1.12200
-2.00000
2.00000
3.14000
3.14000
3.14000
-3.14000
-3.14000
-3.14000
-10.00000
10.00000
-10.73430
10.73433
+12.40000
+12.52000
124.00000
-124.00000
-125.20000
125.20000
+2323.22344
23232.23435
-23232.23440
-2389432.23750
-2389432.23750
-1234567890.12350
+238943.22375
+238943.22375
+123456789.01235
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
CREATE TABLE DECIMAL_6_3 STORED AS ORC AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v
PREHOOK: type: CREATETABLE_AS_SELECT
@@ -655,7 +655,7 @@ STAGE PLANS:
Statistics: Num rows: 27 Data size: 2684 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(10,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(10,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (key + 5.5) (type: decimal(11,5)), (value * 11) (type: int)
outputColumnNames: _col0, _col1
@@ -663,7 +663,7 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [3, 4]
- selectExpressions: DecimalColAddDecimalScalar(col 0:decimal(10,5), val 5.5) -> 3:decimal(11,5), LongColMultiplyLongScalar(col 1:int, val 11) -> 4:int
+ selectExpressions: Decimal64ColAddDecimal64Scalar(col 0:decimal(10,5)/DECIMAL_64, decimal64Val 550000, decimalVal 5.5) -> 3:decimal(11,5)/DECIMAL_64, LongColMultiplyLongScalar(col 1:int, val 11) -> 4:int
Statistics: Num rows: 27 Data size: 3132 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col1 (type: int)
@@ -681,8 +681,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -690,9 +690,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(10,5), value:int
+ dataColumns: key:decimal(10,5)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(11,5), bigint]
+ scratchColumnTypeNames: [decimal(11,5)/DECIMAL_64, bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
[14/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out
index e3d52d2..93b8655 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: i (type: int), s (type: string), b (type: bigint)
sort order: +++
@@ -93,8 +93,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -102,7 +101,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 3, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -297,7 +296,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: d (type: double), s (type: string), f (type: float)
sort order: +++
@@ -316,8 +315,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -325,7 +323,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [4, 5, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -520,7 +518,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 228 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: ts (type: timestamp), f (type: float)
sort order: ++
@@ -540,8 +538,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -549,7 +546,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [4, 7, 8]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -744,7 +741,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 228 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: ts (type: timestamp), s (type: string), f (type: float)
sort order: +++
@@ -763,8 +760,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -772,7 +768,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [4, 7, 8]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -967,7 +963,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: t (type: tinyint), s (type: string), d (type: double)
sort order: ++-
@@ -986,8 +982,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -995,7 +990,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [0, 5, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1190,7 +1185,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 228 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: ts (type: timestamp), s (type: string)
sort order: ++
@@ -1210,8 +1205,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1219,7 +1213,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 7, 8]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1449,7 +1443,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: ts (type: timestamp), f (type: float)
sort order: ++
@@ -1468,8 +1462,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1477,7 +1470,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [4, 8]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1707,7 +1700,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: ts (type: timestamp), f (type: float)
sort order: ++
@@ -1726,8 +1719,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1735,7 +1727,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [4, 8]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1930,7 +1922,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: s (type: string), i (type: int)
sort order: ++
@@ -1950,8 +1942,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1959,7 +1950,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 5, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -2097,7 +2088,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: s (type: string), i (type: int)
sort order: ++
@@ -2117,8 +2108,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2126,7 +2116,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 5, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -2264,7 +2254,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(10,0)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: s (type: string), i (type: int)
sort order: ++
@@ -2284,8 +2274,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2293,7 +2282,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 5, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(10,0)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec4.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec4.q.out
index 3cebb04..78df440 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec4.q.out
@@ -92,8 +92,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_0.q.out b/ql/src/test/results/clientpositive/llap/vectorization_0.q.out
index 1382503..2a8ae49 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_0.q.out
@@ -74,8 +74,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -255,8 +255,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -425,8 +425,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -586,8 +586,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -767,8 +767,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -937,8 +937,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1098,8 +1098,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1279,8 +1279,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1449,8 +1449,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1656,8 +1656,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_1.q.out b/ql/src/test/results/clientpositive/llap/vectorization_1.q.out
index c87926c..bdc4de4 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_1.q.out
@@ -107,8 +107,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_10.q.out b/ql/src/test/results/clientpositive/llap/vectorization_10.q.out
index f4069a0..5cba462 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_10.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_10.q.out
@@ -98,8 +98,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_11.q.out b/ql/src/test/results/clientpositive/llap/vectorization_11.q.out
index 17933b7..45a3e58 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_11.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_11.q.out
@@ -80,8 +80,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_12.q.out b/ql/src/test/results/clientpositive/llap/vectorization_12.q.out
index 0ead6c4..7b508a0 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_12.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_12.q.out
@@ -134,8 +134,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_13.q.out b/ql/src/test/results/clientpositive/llap/vectorization_13.q.out
index d72c298..222d232 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_13.q.out
@@ -136,8 +136,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -488,8 +488,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_14.q.out b/ql/src/test/results/clientpositive/llap/vectorization_14.q.out
index 7ae99a3..6f99dde 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_14.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_14.q.out
@@ -136,8 +136,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_15.q.out b/ql/src/test/results/clientpositive/llap/vectorization_15.q.out
index 31363df..1f13589 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_15.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_15.q.out
@@ -132,8 +132,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_16.q.out b/ql/src/test/results/clientpositive/llap/vectorization_16.q.out
index 59f2d10..33e72fc 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_16.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_16.q.out
@@ -109,8 +109,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_17.q.out b/ql/src/test/results/clientpositive/llap/vectorization_17.q.out
index d0b2f7a..4d5f2e5 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_17.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_17.q.out
@@ -102,8 +102,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_2.q.out b/ql/src/test/results/clientpositive/llap/vectorization_2.q.out
index 83833da..29850bb 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_2.q.out
@@ -111,8 +111,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_3.q.out b/ql/src/test/results/clientpositive/llap/vectorization_3.q.out
index 3c502cd..97ad680 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_3.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_3.q.out
@@ -116,8 +116,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_4.q.out b/ql/src/test/results/clientpositive/llap/vectorization_4.q.out
index a8cfa48..c459a8c 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_4.q.out
@@ -111,8 +111,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_5.q.out b/ql/src/test/results/clientpositive/llap/vectorization_5.q.out
index 5124740..14a4691 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_5.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_5.q.out
@@ -104,8 +104,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_6.q.out b/ql/src/test/results/clientpositive/llap/vectorization_6.q.out
index 4e43bd0..d863334 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_6.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_6.q.out
@@ -92,8 +92,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_7.q.out b/ql/src/test/results/clientpositive/llap/vectorization_7.q.out
index 907411b..b0e682a 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_7.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_7.q.out
@@ -108,8 +108,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -355,8 +355,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_8.q.out b/ql/src/test/results/clientpositive/llap/vectorization_8.q.out
index 64480d7..3a09542 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_8.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_8.q.out
@@ -104,8 +104,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -338,8 +338,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_9.q.out b/ql/src/test/results/clientpositive/llap/vectorization_9.q.out
index 59f2d10..33e72fc 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_9.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_9.q.out
@@ -109,8 +109,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_decimal_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_decimal_date.q.out b/ql/src/test/results/clientpositive/llap/vectorization_decimal_date.q.out
index c83d5e6..f19d8a6 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_decimal_date.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_decimal_date.q.out
@@ -72,8 +72,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_div0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_div0.q.out b/ql/src/test/results/clientpositive/llap/vectorization_div0.q.out
index 61f1e26..8f4acba 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_div0.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_div0.q.out
@@ -54,8 +54,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -273,8 +273,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -492,8 +492,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -711,8 +711,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out b/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
index 6ea5fb2..ded40fd 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_limit.q.out
@@ -43,8 +43,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -156,8 +156,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -314,8 +314,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -519,8 +519,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -678,8 +678,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -915,8 +915,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_nested_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_nested_udf.q.out b/ql/src/test/results/clientpositive/llap/vectorization_nested_udf.q.out
index e6427fa..1a87d1d 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_nested_udf.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_nested_udf.q.out
@@ -64,8 +64,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out b/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out
index 80c7c0c..89c140e 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_part_project.q.out
@@ -85,8 +85,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_pushdown.q.out b/ql/src/test/results/clientpositive/llap/vectorization_pushdown.q.out
index 43f999e..fb5ff60 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_pushdown.q.out
@@ -46,8 +46,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out
index 0830d1c..d12e038 100644
--- a/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorization_short_regress.q.out
@@ -136,8 +136,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -398,8 +398,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -652,8 +652,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -885,8 +885,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1115,8 +1115,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1412,8 +1412,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1659,8 +1659,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1963,8 +1963,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2223,8 +2223,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2500,8 +2500,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2821,8 +2821,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3223,8 +3223,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3458,8 +3458,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3573,8 +3573,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3760,8 +3760,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3875,8 +3875,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3990,8 +3990,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4105,8 +4105,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4220,8 +4220,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4335,8 +4335,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_case.q.out b/ql/src/test/results/clientpositive/llap/vectorized_case.q.out
index 18c23d6..fb81501 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_case.q.out
@@ -86,8 +86,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -235,8 +235,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -326,8 +326,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -470,8 +470,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -586,7 +586,7 @@ STAGE PLANS:
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:member:decimal(10,0), 1:attr:decimal(10,0), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:member:decimal(10,0)/DECIMAL_64, 1:attr:decimal(10,0)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE ((attr + 2)) END (type: decimal(11,0))
outputColumnNames: _col0
@@ -594,7 +594,7 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [6]
- selectExpressions: IfExprCondExprCondExpr(col 3:boolean, col 4:decimal(11,0)col 5:decimal(11,0))(children: VectorUDFAdaptor((member = 1)) -> 3:boolean, DecimalColAddDecimalScalar(col 1:decimal(10,0), val 1) -> 4:decimal(11,0), DecimalColAddDecimalScalar(col 1:decimal(10,0), val 2) -> 5:decimal(11,0)) -> 6:decimal(11,0)
+ selectExpressions: IfExprCondExprCondExpr(col 3:boolean, col 7:decimal(11,0)col 8:decimal(11,0))(children: VectorUDFAdaptor((member = 1)) -> 3:boolean, ConvertDecimal64ToDecimal(col 4:decimal(11,0)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,0)/DECIMAL_64, decimal64Val 1, decimalVal 1) -> 4:decimal(11,0)/DECIMAL_64) -> 7:decimal(11,0), ConvertDecimal64ToDecimal(col 5:decimal(11,0)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,0)/DECIMAL_64, decimal64Val 2, decimalVal 2) -> 5:decimal(11,0)/DECIMAL_64) -> 8:decimal(11,0)) -> 6:decimal(11,0)
Statistics: Num rows: 3 Data size: 336 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
@@ -611,8 +611,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -620,9 +620,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: member:decimal(10,0), attr:decimal(10,0)
+ dataColumns: member:decimal(10,0)/DECIMAL_64, attr:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, decimal(11,0), decimal(11,0), decimal(11,0)]
+ scratchColumnTypeNames: [bigint, decimal(11,0)/DECIMAL_64, decimal(11,0)/DECIMAL_64, decimal(11,0), decimal(11,0), decimal(11,0)]
Stage: Stage-0
Fetch Operator
@@ -667,15 +667,15 @@ STAGE PLANS:
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:member:decimal(10,0), 1:attr:decimal(10,0), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:member:decimal(10,0)/DECIMAL_64, 1:attr:decimal(10,0)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: CASE WHEN ((member = 1)) THEN (1) ELSE ((attr + 2)) END (type: decimal(11,0))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [6]
- selectExpressions: IfExprColumnCondExpr(col 3:boolean, col 4:decimal(1,0)col 5:decimal(11,0))(children: VectorUDFAdaptor((member = 1)) -> 3:boolean, ConstantVectorExpression(val 1) -> 4:decimal(1,0), DecimalColAddDecimalScalar(col 1:decimal(10,0), val 2) -> 5:decimal(11,0)) -> 6:decimal(11,0)
+ projectedOutputColumnNums: [8]
+ selectExpressions: VectorUDFAdaptor(CASE WHEN ((member = 1)) THEN (1) ELSE ((attr + 2)) END)(children: VectorUDFAdaptor((member = 1)) -> 6:boolean, ConvertDecimal64ToDecimal(col 7:decimal(11,0)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,0)/DECIMAL_64, decimal64Val 2, decimalVal 2) -> 7:decimal(11,0)/DECIMAL_64) -> 9:decimal(11,0)) -> 8:decimal(11,0)
Statistics: Num rows: 3 Data size: 336 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
@@ -692,8 +692,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -701,9 +701,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: member:decimal(10,0), attr:decimal(10,0)
+ dataColumns: member:decimal(10,0)/DECIMAL_64, attr:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, decimal(1,0), decimal(11,0), decimal(11,0)]
+ scratchColumnTypeNames: [bigint, decimal(1,0), decimal(11,0)/DECIMAL_64, bigint, decimal(11,0)/DECIMAL_64, decimal(11,0), decimal(11,0)]
Stage: Stage-0
Fetch Operator
@@ -748,15 +748,15 @@ STAGE PLANS:
Statistics: Num rows: 3 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:member:decimal(10,0), 1:attr:decimal(10,0), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:member:decimal(10,0)/DECIMAL_64, 1:attr:decimal(10,0)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE (2) END (type: decimal(11,0))
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [6]
- selectExpressions: IfExprCondExprColumn(col 3:boolean, col 4:decimal(11,0), col 5:decimal(1,0))(children: VectorUDFAdaptor((member = 1)) -> 3:boolean, DecimalColAddDecimalScalar(col 1:decimal(10,0), val 1) -> 4:decimal(11,0), ConstantVectorExpression(val 2) -> 5:decimal(1,0)) -> 6:decimal(11,0)
+ projectedOutputColumnNums: [8]
+ selectExpressions: VectorUDFAdaptor(CASE WHEN ((member = 1)) THEN ((attr + 1)) ELSE (2) END)(children: VectorUDFAdaptor((member = 1)) -> 6:boolean, ConvertDecimal64ToDecimal(col 7:decimal(11,0)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,0)/DECIMAL_64, decimal64Val 1, decimalVal 1) -> 7:decimal(11,0)/DECIMAL_64) -> 9:decimal(11,0)) -> 8:decimal(11,0)
Statistics: Num rows: 3 Data size: 336 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
@@ -773,8 +773,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -782,9 +782,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: member:decimal(10,0), attr:decimal(10,0)
+ dataColumns: member:decimal(10,0)/DECIMAL_64, attr:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, decimal(11,0), decimal(1,0), decimal(11,0)]
+ scratchColumnTypeNames: [bigint, decimal(11,0)/DECIMAL_64, decimal(1,0), bigint, decimal(11,0)/DECIMAL_64, decimal(11,0), decimal(11,0)]
Stage: Stage-0
Fetch Operator
@@ -872,8 +872,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -953,8 +953,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1034,8 +1034,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out b/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out
index b22b5ac..8f5ce87 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_casts.q.out
@@ -200,8 +200,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_context.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_context.q.out b/ql/src/test/results/clientpositive/llap/vectorized_context.q.out
index 3edc12b..778ba26 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_context.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_context.q.out
@@ -163,8 +163,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -191,8 +191,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -220,8 +220,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
[25/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_case_when_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_case_when_2.q.out b/ql/src/test/results/clientpositive/llap/vector_case_when_2.q.out
index b29fd4b..9be5235 100644
--- a/ql/src/test/results/clientpositive/llap/vector_case_when_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_case_when_2.q.out
@@ -426,8 +426,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: true
@@ -711,8 +711,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
index bbaa05c..f801856 100644
--- a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
@@ -171,8 +171,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
index be7c367..73e8060 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_2.q.out
@@ -132,8 +132,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -328,8 +328,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_4.q.out b/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
index ca4acf1..a418e7a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_4.q.out
@@ -174,8 +174,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out b/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out
index 72cd1d3..7aa82d0 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_mapjoin1.q.out
@@ -195,8 +195,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -238,8 +238,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -349,8 +349,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -405,8 +405,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -519,8 +519,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -575,8 +575,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out b/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out
index 696359b..c283674 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_simple.q.out
@@ -75,8 +75,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -161,8 +161,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -259,8 +259,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_char_varchar_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_char_varchar_1.q.out b/ql/src/test/results/clientpositive/llap/vector_char_varchar_1.q.out
index 1a32227..5a23539 100644
--- a/ql/src/test/results/clientpositive/llap/vector_char_varchar_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_char_varchar_1.q.out
@@ -83,8 +83,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -231,8 +230,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out b/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out
index 339df62..bc00c98 100644
--- a/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_coalesce.q.out
@@ -46,8 +46,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -154,8 +154,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -262,8 +262,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -346,8 +346,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -449,8 +449,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -531,8 +531,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out b/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
index 451a6e1..f43c9ea 100644
--- a/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_coalesce_2.q.out
@@ -99,8 +99,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -223,8 +223,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -324,8 +324,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -448,8 +448,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_coalesce_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_coalesce_3.q.out b/ql/src/test/results/clientpositive/llap/vector_coalesce_3.q.out
index d8b2ced..d05dd70 100644
--- a/ql/src/test/results/clientpositive/llap/vector_coalesce_3.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_coalesce_3.q.out
@@ -135,8 +135,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -180,8 +180,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_coalesce_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_coalesce_4.q.out b/ql/src/test/results/clientpositive/llap/vector_coalesce_4.q.out
index 409c68c..e609d14 100644
--- a/ql/src/test/results/clientpositive/llap/vector_coalesce_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_coalesce_4.q.out
@@ -81,8 +81,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out b/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
index 87525fe..d5ea64f 100644
--- a/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
@@ -117,8 +117,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -199,8 +199,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -281,8 +281,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -363,8 +363,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -445,8 +445,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -527,8 +527,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -611,8 +611,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -959,8 +959,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1185,8 +1185,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1328,8 +1328,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out b/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out
index 589e807..c4f59f0 100644
--- a/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_complex_join.q.out
@@ -91,8 +91,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -134,8 +134,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -270,8 +270,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -314,8 +314,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -413,8 +413,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -457,8 +457,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_count.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_count.q.out b/ql/src/test/results/clientpositive/llap/vector_count.q.out
index ce35eb8..20a655b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_count.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_count.q.out
@@ -271,8 +271,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -366,8 +366,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out b/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out
index 42fcdbd..dd54bd5 100644
--- a/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_count_distinct.q.out
@@ -1289,8 +1289,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_create_struct_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_create_struct_table.q.out b/ql/src/test/results/clientpositive/llap/vector_create_struct_table.q.out
index c08154e..f312244 100644
--- a/ql/src/test/results/clientpositive/llap/vector_create_struct_table.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_create_struct_table.q.out
@@ -79,8 +79,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -191,8 +190,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -303,8 +301,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_data_types.q.out b/ql/src/test/results/clientpositive/llap/vector_data_types.q.out
index a709210..a1d18cd 100644
--- a/ql/src/test/results/clientpositive/llap/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_data_types.q.out
@@ -260,8 +260,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -399,8 +399,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_date_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_date_1.q.out b/ql/src/test/results/clientpositive/llap/vector_date_1.q.out
index 2a77c39..9d96979 100644
--- a/ql/src/test/results/clientpositive/llap/vector_date_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_date_1.q.out
@@ -139,8 +139,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -304,8 +304,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -469,8 +469,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -634,8 +634,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -803,8 +803,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -970,8 +970,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1081,8 +1081,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_1.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_1.q.out
index 05c43fb..b0e5787 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_1.q.out
@@ -72,22 +72,22 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToBoolean(t) (type: boolean)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToBoolean(col 0:decimal(4,2)) -> 4:boolean
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToBoolean(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:boolean
Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: boolean)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [4]
+ keyColumnNums: [5]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -97,8 +97,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -106,9 +106,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(4,2), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -189,22 +189,22 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToByte(t) (type: tinyint)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToLong(col 0:decimal(4,2)) -> 4:tinyint
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToLong(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:tinyint
Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: tinyint)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [4]
+ keyColumnNums: [5]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -214,8 +214,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -223,9 +223,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(4,2), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -306,22 +306,22 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToShort(t) (type: smallint)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToLong(col 0:decimal(4,2)) -> 4:smallint
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToLong(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:smallint
Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: smallint)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [4]
+ keyColumnNums: [5]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -331,8 +331,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -340,9 +340,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(4,2), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -423,22 +423,22 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToInteger(t) (type: int)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToLong(col 0:decimal(4,2)) -> 4:int
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToLong(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:int
Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [4]
+ keyColumnNums: [5]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -448,8 +448,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -457,9 +457,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(4,2), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -540,22 +540,22 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToLong(t) (type: bigint)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToLong(col 0:decimal(4,2)) -> 4:bigint
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToLong(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:bigint
Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: bigint)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [4]
+ keyColumnNums: [5]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -565,8 +565,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -574,9 +574,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(4,2), bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -657,22 +657,22 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToFloat(t) (type: float)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToFloat(col 0:decimal(4,2)) -> 4:float
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToFloat(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:float
Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: float)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [4]
+ keyColumnNums: [5]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -682,8 +682,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -691,9 +691,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [double]
+ scratchColumnTypeNames: [decimal(4,2), double]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -774,22 +774,22 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToDouble(t) (type: double)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToDouble(col 0:decimal(4,2)) -> 4:double
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToDouble(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:double
Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: double)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [4]
+ keyColumnNums: [5]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -799,8 +799,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -808,9 +808,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [double]
+ scratchColumnTypeNames: [decimal(4,2), double]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -891,22 +891,22 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToString(t) (type: string)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToString(col 0:decimal(4,2)) -> 4:string
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToString(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:string
Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [4]
+ keyColumnNums: [5]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -916,8 +916,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -925,9 +925,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [string]
+ scratchColumnTypeNames: [decimal(4,2), string]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -1008,22 +1008,22 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: CAST( t AS TIMESTAMP) (type: timestamp)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToTimestamp(col 0:decimal(4,2)) -> 4:timestamp
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToTimestamp(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:timestamp
Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: timestamp)
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [4]
+ keyColumnNums: [5]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -1033,8 +1033,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1042,9 +1042,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [timestamp]
+ scratchColumnTypeNames: [decimal(4,2), timestamp]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_10_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_10_0.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_10_0.q.out
index 5e835cd..3170625 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_10_0.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_10_0.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:dec:decimal(10,0), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:dec:decimal(10,0)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: dec (type: decimal(10,0))
outputColumnNames: _col0
@@ -86,8 +86,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -95,7 +95,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: dec:decimal(10,0)
+ dataColumns: dec:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -178,7 +178,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:dec:decimal(10,0), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:dec:decimal(10,0)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: dec (type: decimal(10,0))
outputColumnNames: _col0
@@ -203,8 +203,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -212,7 +211,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: dec:decimal(10,0)
+ dataColumns: dec:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
[56/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index ec26cca..38895e3 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -714,14 +714,12 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
*/
public function alter_partitions($db_name, $tbl_name, array $new_parts);
/**
- * @param string $db_name
- * @param string $tbl_name
- * @param \metastore\Partition[] $new_parts
- * @param \metastore\EnvironmentContext $environment_context
+ * @param \metastore\AlterPartitionsRequest $req
+ * @return \metastore\AlterPartitionsResponse
* @throws \metastore\InvalidOperationException
* @throws \metastore\MetaException
*/
- public function alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context);
+ public function alter_partitions_with_environment_context(\metastore\AlterPartitionsRequest $req);
/**
* @param string $db_name
* @param string $tbl_name
@@ -6394,19 +6392,16 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
return;
}
- public function alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context)
+ public function alter_partitions_with_environment_context(\metastore\AlterPartitionsRequest $req)
{
- $this->send_alter_partitions_with_environment_context($db_name, $tbl_name, $new_parts, $environment_context);
- $this->recv_alter_partitions_with_environment_context();
+ $this->send_alter_partitions_with_environment_context($req);
+ return $this->recv_alter_partitions_with_environment_context();
}
- public function send_alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context)
+ public function send_alter_partitions_with_environment_context(\metastore\AlterPartitionsRequest $req)
{
$args = new \metastore\ThriftHiveMetastore_alter_partitions_with_environment_context_args();
- $args->db_name = $db_name;
- $args->tbl_name = $tbl_name;
- $args->new_parts = $new_parts;
- $args->environment_context = $environment_context;
+ $args->req = $req;
$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
if ($bin_accel)
{
@@ -6442,13 +6437,16 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
$result->read($this->input_);
$this->input_->readMessageEnd();
}
+ if ($result->success !== null) {
+ return $result->success;
+ }
if ($result->o1 !== null) {
throw $result->o1;
}
if ($result->o2 !== null) {
throw $result->o2;
}
- return;
+ throw new \Exception("alter_partitions_with_environment_context failed: unknown result");
}
public function alter_partition_with_environment_context($db_name, $tbl_name, \metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context)
@@ -15440,14 +15438,14 @@ class ThriftHiveMetastore_get_databases_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size820 = 0;
- $_etype823 = 0;
- $xfer += $input->readListBegin($_etype823, $_size820);
- for ($_i824 = 0; $_i824 < $_size820; ++$_i824)
+ $_size827 = 0;
+ $_etype830 = 0;
+ $xfer += $input->readListBegin($_etype830, $_size827);
+ for ($_i831 = 0; $_i831 < $_size827; ++$_i831)
{
- $elem825 = null;
- $xfer += $input->readString($elem825);
- $this->success []= $elem825;
+ $elem832 = null;
+ $xfer += $input->readString($elem832);
+ $this->success []= $elem832;
}
$xfer += $input->readListEnd();
} else {
@@ -15483,9 +15481,9 @@ class ThriftHiveMetastore_get_databases_result {
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter826)
+ foreach ($this->success as $iter833)
{
- $xfer += $output->writeString($iter826);
+ $xfer += $output->writeString($iter833);
}
}
$output->writeListEnd();
@@ -15616,14 +15614,14 @@ class ThriftHiveMetastore_get_all_databases_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size827 = 0;
- $_etype830 = 0;
- $xfer += $input->readListBegin($_etype830, $_size827);
- for ($_i831 = 0; $_i831 < $_size827; ++$_i831)
+ $_size834 = 0;
+ $_etype837 = 0;
+ $xfer += $input->readListBegin($_etype837, $_size834);
+ for ($_i838 = 0; $_i838 < $_size834; ++$_i838)
{
- $elem832 = null;
- $xfer += $input->readString($elem832);
- $this->success []= $elem832;
+ $elem839 = null;
+ $xfer += $input->readString($elem839);
+ $this->success []= $elem839;
}
$xfer += $input->readListEnd();
} else {
@@ -15659,9 +15657,9 @@ class ThriftHiveMetastore_get_all_databases_result {
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter833)
+ foreach ($this->success as $iter840)
{
- $xfer += $output->writeString($iter833);
+ $xfer += $output->writeString($iter840);
}
}
$output->writeListEnd();
@@ -16662,18 +16660,18 @@ class ThriftHiveMetastore_get_type_all_result {
case 0:
if ($ftype == TType::MAP) {
$this->success = array();
- $_size834 = 0;
- $_ktype835 = 0;
- $_vtype836 = 0;
- $xfer += $input->readMapBegin($_ktype835, $_vtype836, $_size834);
- for ($_i838 = 0; $_i838 < $_size834; ++$_i838)
+ $_size841 = 0;
+ $_ktype842 = 0;
+ $_vtype843 = 0;
+ $xfer += $input->readMapBegin($_ktype842, $_vtype843, $_size841);
+ for ($_i845 = 0; $_i845 < $_size841; ++$_i845)
{
- $key839 = '';
- $val840 = new \metastore\Type();
- $xfer += $input->readString($key839);
- $val840 = new \metastore\Type();
- $xfer += $val840->read($input);
- $this->success[$key839] = $val840;
+ $key846 = '';
+ $val847 = new \metastore\Type();
+ $xfer += $input->readString($key846);
+ $val847 = new \metastore\Type();
+ $xfer += $val847->read($input);
+ $this->success[$key846] = $val847;
}
$xfer += $input->readMapEnd();
} else {
@@ -16709,10 +16707,10 @@ class ThriftHiveMetastore_get_type_all_result {
{
$output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success));
{
- foreach ($this->success as $kiter841 => $viter842)
+ foreach ($this->success as $kiter848 => $viter849)
{
- $xfer += $output->writeString($kiter841);
- $xfer += $viter842->write($output);
+ $xfer += $output->writeString($kiter848);
+ $xfer += $viter849->write($output);
}
}
$output->writeMapEnd();
@@ -16916,15 +16914,15 @@ class ThriftHiveMetastore_get_fields_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size843 = 0;
- $_etype846 = 0;
- $xfer += $input->readListBegin($_etype846, $_size843);
- for ($_i847 = 0; $_i847 < $_size843; ++$_i847)
+ $_size850 = 0;
+ $_etype853 = 0;
+ $xfer += $input->readListBegin($_etype853, $_size850);
+ for ($_i854 = 0; $_i854 < $_size850; ++$_i854)
{
- $elem848 = null;
- $elem848 = new \metastore\FieldSchema();
- $xfer += $elem848->read($input);
- $this->success []= $elem848;
+ $elem855 = null;
+ $elem855 = new \metastore\FieldSchema();
+ $xfer += $elem855->read($input);
+ $this->success []= $elem855;
}
$xfer += $input->readListEnd();
} else {
@@ -16976,9 +16974,9 @@ class ThriftHiveMetastore_get_fields_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter849)
+ foreach ($this->success as $iter856)
{
- $xfer += $iter849->write($output);
+ $xfer += $iter856->write($output);
}
}
$output->writeListEnd();
@@ -17220,15 +17218,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size850 = 0;
- $_etype853 = 0;
- $xfer += $input->readListBegin($_etype853, $_size850);
- for ($_i854 = 0; $_i854 < $_size850; ++$_i854)
+ $_size857 = 0;
+ $_etype860 = 0;
+ $xfer += $input->readListBegin($_etype860, $_size857);
+ for ($_i861 = 0; $_i861 < $_size857; ++$_i861)
{
- $elem855 = null;
- $elem855 = new \metastore\FieldSchema();
- $xfer += $elem855->read($input);
- $this->success []= $elem855;
+ $elem862 = null;
+ $elem862 = new \metastore\FieldSchema();
+ $xfer += $elem862->read($input);
+ $this->success []= $elem862;
}
$xfer += $input->readListEnd();
} else {
@@ -17280,9 +17278,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter856)
+ foreach ($this->success as $iter863)
{
- $xfer += $iter856->write($output);
+ $xfer += $iter863->write($output);
}
}
$output->writeListEnd();
@@ -17496,15 +17494,15 @@ class ThriftHiveMetastore_get_schema_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size857 = 0;
- $_etype860 = 0;
- $xfer += $input->readListBegin($_etype860, $_size857);
- for ($_i861 = 0; $_i861 < $_size857; ++$_i861)
+ $_size864 = 0;
+ $_etype867 = 0;
+ $xfer += $input->readListBegin($_etype867, $_size864);
+ for ($_i868 = 0; $_i868 < $_size864; ++$_i868)
{
- $elem862 = null;
- $elem862 = new \metastore\FieldSchema();
- $xfer += $elem862->read($input);
- $this->success []= $elem862;
+ $elem869 = null;
+ $elem869 = new \metastore\FieldSchema();
+ $xfer += $elem869->read($input);
+ $this->success []= $elem869;
}
$xfer += $input->readListEnd();
} else {
@@ -17556,9 +17554,9 @@ class ThriftHiveMetastore_get_schema_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter863)
+ foreach ($this->success as $iter870)
{
- $xfer += $iter863->write($output);
+ $xfer += $iter870->write($output);
}
}
$output->writeListEnd();
@@ -17800,15 +17798,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size864 = 0;
- $_etype867 = 0;
- $xfer += $input->readListBegin($_etype867, $_size864);
- for ($_i868 = 0; $_i868 < $_size864; ++$_i868)
+ $_size871 = 0;
+ $_etype874 = 0;
+ $xfer += $input->readListBegin($_etype874, $_size871);
+ for ($_i875 = 0; $_i875 < $_size871; ++$_i875)
{
- $elem869 = null;
- $elem869 = new \metastore\FieldSchema();
- $xfer += $elem869->read($input);
- $this->success []= $elem869;
+ $elem876 = null;
+ $elem876 = new \metastore\FieldSchema();
+ $xfer += $elem876->read($input);
+ $this->success []= $elem876;
}
$xfer += $input->readListEnd();
} else {
@@ -17860,9 +17858,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter870)
+ foreach ($this->success as $iter877)
{
- $xfer += $iter870->write($output);
+ $xfer += $iter877->write($output);
}
}
$output->writeListEnd();
@@ -18534,15 +18532,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
case 2:
if ($ftype == TType::LST) {
$this->primaryKeys = array();
- $_size871 = 0;
- $_etype874 = 0;
- $xfer += $input->readListBegin($_etype874, $_size871);
- for ($_i875 = 0; $_i875 < $_size871; ++$_i875)
+ $_size878 = 0;
+ $_etype881 = 0;
+ $xfer += $input->readListBegin($_etype881, $_size878);
+ for ($_i882 = 0; $_i882 < $_size878; ++$_i882)
{
- $elem876 = null;
- $elem876 = new \metastore\SQLPrimaryKey();
- $xfer += $elem876->read($input);
- $this->primaryKeys []= $elem876;
+ $elem883 = null;
+ $elem883 = new \metastore\SQLPrimaryKey();
+ $xfer += $elem883->read($input);
+ $this->primaryKeys []= $elem883;
}
$xfer += $input->readListEnd();
} else {
@@ -18552,15 +18550,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
case 3:
if ($ftype == TType::LST) {
$this->foreignKeys = array();
- $_size877 = 0;
- $_etype880 = 0;
- $xfer += $input->readListBegin($_etype880, $_size877);
- for ($_i881 = 0; $_i881 < $_size877; ++$_i881)
+ $_size884 = 0;
+ $_etype887 = 0;
+ $xfer += $input->readListBegin($_etype887, $_size884);
+ for ($_i888 = 0; $_i888 < $_size884; ++$_i888)
{
- $elem882 = null;
- $elem882 = new \metastore\SQLForeignKey();
- $xfer += $elem882->read($input);
- $this->foreignKeys []= $elem882;
+ $elem889 = null;
+ $elem889 = new \metastore\SQLForeignKey();
+ $xfer += $elem889->read($input);
+ $this->foreignKeys []= $elem889;
}
$xfer += $input->readListEnd();
} else {
@@ -18570,15 +18568,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
case 4:
if ($ftype == TType::LST) {
$this->uniqueConstraints = array();
- $_size883 = 0;
- $_etype886 = 0;
- $xfer += $input->readListBegin($_etype886, $_size883);
- for ($_i887 = 0; $_i887 < $_size883; ++$_i887)
+ $_size890 = 0;
+ $_etype893 = 0;
+ $xfer += $input->readListBegin($_etype893, $_size890);
+ for ($_i894 = 0; $_i894 < $_size890; ++$_i894)
{
- $elem888 = null;
- $elem888 = new \metastore\SQLUniqueConstraint();
- $xfer += $elem888->read($input);
- $this->uniqueConstraints []= $elem888;
+ $elem895 = null;
+ $elem895 = new \metastore\SQLUniqueConstraint();
+ $xfer += $elem895->read($input);
+ $this->uniqueConstraints []= $elem895;
}
$xfer += $input->readListEnd();
} else {
@@ -18588,15 +18586,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
case 5:
if ($ftype == TType::LST) {
$this->notNullConstraints = array();
- $_size889 = 0;
- $_etype892 = 0;
- $xfer += $input->readListBegin($_etype892, $_size889);
- for ($_i893 = 0; $_i893 < $_size889; ++$_i893)
+ $_size896 = 0;
+ $_etype899 = 0;
+ $xfer += $input->readListBegin($_etype899, $_size896);
+ for ($_i900 = 0; $_i900 < $_size896; ++$_i900)
{
- $elem894 = null;
- $elem894 = new \metastore\SQLNotNullConstraint();
- $xfer += $elem894->read($input);
- $this->notNullConstraints []= $elem894;
+ $elem901 = null;
+ $elem901 = new \metastore\SQLNotNullConstraint();
+ $xfer += $elem901->read($input);
+ $this->notNullConstraints []= $elem901;
}
$xfer += $input->readListEnd();
} else {
@@ -18606,15 +18604,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
case 6:
if ($ftype == TType::LST) {
$this->defaultConstraints = array();
- $_size895 = 0;
- $_etype898 = 0;
- $xfer += $input->readListBegin($_etype898, $_size895);
- for ($_i899 = 0; $_i899 < $_size895; ++$_i899)
+ $_size902 = 0;
+ $_etype905 = 0;
+ $xfer += $input->readListBegin($_etype905, $_size902);
+ for ($_i906 = 0; $_i906 < $_size902; ++$_i906)
{
- $elem900 = null;
- $elem900 = new \metastore\SQLDefaultConstraint();
- $xfer += $elem900->read($input);
- $this->defaultConstraints []= $elem900;
+ $elem907 = null;
+ $elem907 = new \metastore\SQLDefaultConstraint();
+ $xfer += $elem907->read($input);
+ $this->defaultConstraints []= $elem907;
}
$xfer += $input->readListEnd();
} else {
@@ -18624,15 +18622,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
case 7:
if ($ftype == TType::LST) {
$this->checkConstraints = array();
- $_size901 = 0;
- $_etype904 = 0;
- $xfer += $input->readListBegin($_etype904, $_size901);
- for ($_i905 = 0; $_i905 < $_size901; ++$_i905)
+ $_size908 = 0;
+ $_etype911 = 0;
+ $xfer += $input->readListBegin($_etype911, $_size908);
+ for ($_i912 = 0; $_i912 < $_size908; ++$_i912)
{
- $elem906 = null;
- $elem906 = new \metastore\SQLCheckConstraint();
- $xfer += $elem906->read($input);
- $this->checkConstraints []= $elem906;
+ $elem913 = null;
+ $elem913 = new \metastore\SQLCheckConstraint();
+ $xfer += $elem913->read($input);
+ $this->checkConstraints []= $elem913;
}
$xfer += $input->readListEnd();
} else {
@@ -18668,9 +18666,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
{
$output->writeListBegin(TType::STRUCT, count($this->primaryKeys));
{
- foreach ($this->primaryKeys as $iter907)
+ foreach ($this->primaryKeys as $iter914)
{
- $xfer += $iter907->write($output);
+ $xfer += $iter914->write($output);
}
}
$output->writeListEnd();
@@ -18685,9 +18683,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
{
$output->writeListBegin(TType::STRUCT, count($this->foreignKeys));
{
- foreach ($this->foreignKeys as $iter908)
+ foreach ($this->foreignKeys as $iter915)
{
- $xfer += $iter908->write($output);
+ $xfer += $iter915->write($output);
}
}
$output->writeListEnd();
@@ -18702,9 +18700,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
{
$output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints));
{
- foreach ($this->uniqueConstraints as $iter909)
+ foreach ($this->uniqueConstraints as $iter916)
{
- $xfer += $iter909->write($output);
+ $xfer += $iter916->write($output);
}
}
$output->writeListEnd();
@@ -18719,9 +18717,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
{
$output->writeListBegin(TType::STRUCT, count($this->notNullConstraints));
{
- foreach ($this->notNullConstraints as $iter910)
+ foreach ($this->notNullConstraints as $iter917)
{
- $xfer += $iter910->write($output);
+ $xfer += $iter917->write($output);
}
}
$output->writeListEnd();
@@ -18736,9 +18734,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
{
$output->writeListBegin(TType::STRUCT, count($this->defaultConstraints));
{
- foreach ($this->defaultConstraints as $iter911)
+ foreach ($this->defaultConstraints as $iter918)
{
- $xfer += $iter911->write($output);
+ $xfer += $iter918->write($output);
}
}
$output->writeListEnd();
@@ -18753,9 +18751,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
{
$output->writeListBegin(TType::STRUCT, count($this->checkConstraints));
{
- foreach ($this->checkConstraints as $iter912)
+ foreach ($this->checkConstraints as $iter919)
{
- $xfer += $iter912->write($output);
+ $xfer += $iter919->write($output);
}
}
$output->writeListEnd();
@@ -20755,14 +20753,14 @@ class ThriftHiveMetastore_truncate_table_args {
case 3:
if ($ftype == TType::LST) {
$this->partNames = array();
- $_size913 = 0;
- $_etype916 = 0;
- $xfer += $input->readListBegin($_etype916, $_size913);
- for ($_i917 = 0; $_i917 < $_size913; ++$_i917)
+ $_size920 = 0;
+ $_etype923 = 0;
+ $xfer += $input->readListBegin($_etype923, $_size920);
+ for ($_i924 = 0; $_i924 < $_size920; ++$_i924)
{
- $elem918 = null;
- $xfer += $input->readString($elem918);
- $this->partNames []= $elem918;
+ $elem925 = null;
+ $xfer += $input->readString($elem925);
+ $this->partNames []= $elem925;
}
$xfer += $input->readListEnd();
} else {
@@ -20800,9 +20798,9 @@ class ThriftHiveMetastore_truncate_table_args {
{
$output->writeListBegin(TType::STRING, count($this->partNames));
{
- foreach ($this->partNames as $iter919)
+ foreach ($this->partNames as $iter926)
{
- $xfer += $output->writeString($iter919);
+ $xfer += $output->writeString($iter926);
}
}
$output->writeListEnd();
@@ -21053,14 +21051,14 @@ class ThriftHiveMetastore_get_tables_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size920 = 0;
- $_etype923 = 0;
- $xfer += $input->readListBegin($_etype923, $_size920);
- for ($_i924 = 0; $_i924 < $_size920; ++$_i924)
+ $_size927 = 0;
+ $_etype930 = 0;
+ $xfer += $input->readListBegin($_etype930, $_size927);
+ for ($_i931 = 0; $_i931 < $_size927; ++$_i931)
{
- $elem925 = null;
- $xfer += $input->readString($elem925);
- $this->success []= $elem925;
+ $elem932 = null;
+ $xfer += $input->readString($elem932);
+ $this->success []= $elem932;
}
$xfer += $input->readListEnd();
} else {
@@ -21096,9 +21094,9 @@ class ThriftHiveMetastore_get_tables_result {
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter926)
+ foreach ($this->success as $iter933)
{
- $xfer += $output->writeString($iter926);
+ $xfer += $output->writeString($iter933);
}
}
$output->writeListEnd();
@@ -21300,14 +21298,14 @@ class ThriftHiveMetastore_get_tables_by_type_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size927 = 0;
- $_etype930 = 0;
- $xfer += $input->readListBegin($_etype930, $_size927);
- for ($_i931 = 0; $_i931 < $_size927; ++$_i931)
+ $_size934 = 0;
+ $_etype937 = 0;
+ $xfer += $input->readListBegin($_etype937, $_size934);
+ for ($_i938 = 0; $_i938 < $_size934; ++$_i938)
{
- $elem932 = null;
- $xfer += $input->readString($elem932);
- $this->success []= $elem932;
+ $elem939 = null;
+ $xfer += $input->readString($elem939);
+ $this->success []= $elem939;
}
$xfer += $input->readListEnd();
} else {
@@ -21343,9 +21341,9 @@ class ThriftHiveMetastore_get_tables_by_type_result {
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter933)
+ foreach ($this->success as $iter940)
{
- $xfer += $output->writeString($iter933);
+ $xfer += $output->writeString($iter940);
}
}
$output->writeListEnd();
@@ -21501,14 +21499,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size934 = 0;
- $_etype937 = 0;
- $xfer += $input->readListBegin($_etype937, $_size934);
- for ($_i938 = 0; $_i938 < $_size934; ++$_i938)
+ $_size941 = 0;
+ $_etype944 = 0;
+ $xfer += $input->readListBegin($_etype944, $_size941);
+ for ($_i945 = 0; $_i945 < $_size941; ++$_i945)
{
- $elem939 = null;
- $xfer += $input->readString($elem939);
- $this->success []= $elem939;
+ $elem946 = null;
+ $xfer += $input->readString($elem946);
+ $this->success []= $elem946;
}
$xfer += $input->readListEnd();
} else {
@@ -21544,9 +21542,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result {
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter940)
+ foreach ($this->success as $iter947)
{
- $xfer += $output->writeString($iter940);
+ $xfer += $output->writeString($iter947);
}
}
$output->writeListEnd();
@@ -21651,14 +21649,14 @@ class ThriftHiveMetastore_get_table_meta_args {
case 3:
if ($ftype == TType::LST) {
$this->tbl_types = array();
- $_size941 = 0;
- $_etype944 = 0;
- $xfer += $input->readListBegin($_etype944, $_size941);
- for ($_i945 = 0; $_i945 < $_size941; ++$_i945)
+ $_size948 = 0;
+ $_etype951 = 0;
+ $xfer += $input->readListBegin($_etype951, $_size948);
+ for ($_i952 = 0; $_i952 < $_size948; ++$_i952)
{
- $elem946 = null;
- $xfer += $input->readString($elem946);
- $this->tbl_types []= $elem946;
+ $elem953 = null;
+ $xfer += $input->readString($elem953);
+ $this->tbl_types []= $elem953;
}
$xfer += $input->readListEnd();
} else {
@@ -21696,9 +21694,9 @@ class ThriftHiveMetastore_get_table_meta_args {
{
$output->writeListBegin(TType::STRING, count($this->tbl_types));
{
- foreach ($this->tbl_types as $iter947)
+ foreach ($this->tbl_types as $iter954)
{
- $xfer += $output->writeString($iter947);
+ $xfer += $output->writeString($iter954);
}
}
$output->writeListEnd();
@@ -21775,15 +21773,15 @@ class ThriftHiveMetastore_get_table_meta_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size948 = 0;
- $_etype951 = 0;
- $xfer += $input->readListBegin($_etype951, $_size948);
- for ($_i952 = 0; $_i952 < $_size948; ++$_i952)
+ $_size955 = 0;
+ $_etype958 = 0;
+ $xfer += $input->readListBegin($_etype958, $_size955);
+ for ($_i959 = 0; $_i959 < $_size955; ++$_i959)
{
- $elem953 = null;
- $elem953 = new \metastore\TableMeta();
- $xfer += $elem953->read($input);
- $this->success []= $elem953;
+ $elem960 = null;
+ $elem960 = new \metastore\TableMeta();
+ $xfer += $elem960->read($input);
+ $this->success []= $elem960;
}
$xfer += $input->readListEnd();
} else {
@@ -21819,9 +21817,9 @@ class ThriftHiveMetastore_get_table_meta_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter954)
+ foreach ($this->success as $iter961)
{
- $xfer += $iter954->write($output);
+ $xfer += $iter961->write($output);
}
}
$output->writeListEnd();
@@ -21977,14 +21975,14 @@ class ThriftHiveMetastore_get_all_tables_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size955 = 0;
- $_etype958 = 0;
- $xfer += $input->readListBegin($_etype958, $_size955);
- for ($_i959 = 0; $_i959 < $_size955; ++$_i959)
+ $_size962 = 0;
+ $_etype965 = 0;
+ $xfer += $input->readListBegin($_etype965, $_size962);
+ for ($_i966 = 0; $_i966 < $_size962; ++$_i966)
{
- $elem960 = null;
- $xfer += $input->readString($elem960);
- $this->success []= $elem960;
+ $elem967 = null;
+ $xfer += $input->readString($elem967);
+ $this->success []= $elem967;
}
$xfer += $input->readListEnd();
} else {
@@ -22020,9 +22018,9 @@ class ThriftHiveMetastore_get_all_tables_result {
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter961)
+ foreach ($this->success as $iter968)
{
- $xfer += $output->writeString($iter961);
+ $xfer += $output->writeString($iter968);
}
}
$output->writeListEnd();
@@ -22337,14 +22335,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args {
case 2:
if ($ftype == TType::LST) {
$this->tbl_names = array();
- $_size962 = 0;
- $_etype965 = 0;
- $xfer += $input->readListBegin($_etype965, $_size962);
- for ($_i966 = 0; $_i966 < $_size962; ++$_i966)
+ $_size969 = 0;
+ $_etype972 = 0;
+ $xfer += $input->readListBegin($_etype972, $_size969);
+ for ($_i973 = 0; $_i973 < $_size969; ++$_i973)
{
- $elem967 = null;
- $xfer += $input->readString($elem967);
- $this->tbl_names []= $elem967;
+ $elem974 = null;
+ $xfer += $input->readString($elem974);
+ $this->tbl_names []= $elem974;
}
$xfer += $input->readListEnd();
} else {
@@ -22377,9 +22375,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args {
{
$output->writeListBegin(TType::STRING, count($this->tbl_names));
{
- foreach ($this->tbl_names as $iter968)
+ foreach ($this->tbl_names as $iter975)
{
- $xfer += $output->writeString($iter968);
+ $xfer += $output->writeString($iter975);
}
}
$output->writeListEnd();
@@ -22444,15 +22442,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size969 = 0;
- $_etype972 = 0;
- $xfer += $input->readListBegin($_etype972, $_size969);
- for ($_i973 = 0; $_i973 < $_size969; ++$_i973)
+ $_size976 = 0;
+ $_etype979 = 0;
+ $xfer += $input->readListBegin($_etype979, $_size976);
+ for ($_i980 = 0; $_i980 < $_size976; ++$_i980)
{
- $elem974 = null;
- $elem974 = new \metastore\Table();
- $xfer += $elem974->read($input);
- $this->success []= $elem974;
+ $elem981 = null;
+ $elem981 = new \metastore\Table();
+ $xfer += $elem981->read($input);
+ $this->success []= $elem981;
}
$xfer += $input->readListEnd();
} else {
@@ -22480,9 +22478,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter975)
+ foreach ($this->success as $iter982)
{
- $xfer += $iter975->write($output);
+ $xfer += $iter982->write($output);
}
}
$output->writeListEnd();
@@ -23009,14 +23007,14 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args {
case 2:
if ($ftype == TType::LST) {
$this->tbl_names = array();
- $_size976 = 0;
- $_etype979 = 0;
- $xfer += $input->readListBegin($_etype979, $_size976);
- for ($_i980 = 0; $_i980 < $_size976; ++$_i980)
+ $_size983 = 0;
+ $_etype986 = 0;
+ $xfer += $input->readListBegin($_etype986, $_size983);
+ for ($_i987 = 0; $_i987 < $_size983; ++$_i987)
{
- $elem981 = null;
- $xfer += $input->readString($elem981);
- $this->tbl_names []= $elem981;
+ $elem988 = null;
+ $xfer += $input->readString($elem988);
+ $this->tbl_names []= $elem988;
}
$xfer += $input->readListEnd();
} else {
@@ -23049,9 +23047,9 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args {
{
$output->writeListBegin(TType::STRING, count($this->tbl_names));
{
- foreach ($this->tbl_names as $iter982)
+ foreach ($this->tbl_names as $iter989)
{
- $xfer += $output->writeString($iter982);
+ $xfer += $output->writeString($iter989);
}
}
$output->writeListEnd();
@@ -23156,18 +23154,18 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result {
case 0:
if ($ftype == TType::MAP) {
$this->success = array();
- $_size983 = 0;
- $_ktype984 = 0;
- $_vtype985 = 0;
- $xfer += $input->readMapBegin($_ktype984, $_vtype985, $_size983);
- for ($_i987 = 0; $_i987 < $_size983; ++$_i987)
+ $_size990 = 0;
+ $_ktype991 = 0;
+ $_vtype992 = 0;
+ $xfer += $input->readMapBegin($_ktype991, $_vtype992, $_size990);
+ for ($_i994 = 0; $_i994 < $_size990; ++$_i994)
{
- $key988 = '';
- $val989 = new \metastore\Materialization();
- $xfer += $input->readString($key988);
- $val989 = new \metastore\Materialization();
- $xfer += $val989->read($input);
- $this->success[$key988] = $val989;
+ $key995 = '';
+ $val996 = new \metastore\Materialization();
+ $xfer += $input->readString($key995);
+ $val996 = new \metastore\Materialization();
+ $xfer += $val996->read($input);
+ $this->success[$key995] = $val996;
}
$xfer += $input->readMapEnd();
} else {
@@ -23219,10 +23217,10 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result {
{
$output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success));
{
- foreach ($this->success as $kiter990 => $viter991)
+ foreach ($this->success as $kiter997 => $viter998)
{
- $xfer += $output->writeString($kiter990);
- $xfer += $viter991->write($output);
+ $xfer += $output->writeString($kiter997);
+ $xfer += $viter998->write($output);
}
}
$output->writeMapEnd();
@@ -23734,14 +23732,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size992 = 0;
- $_etype995 = 0;
- $xfer += $input->readListBegin($_etype995, $_size992);
- for ($_i996 = 0; $_i996 < $_size992; ++$_i996)
+ $_size999 = 0;
+ $_etype1002 = 0;
+ $xfer += $input->readListBegin($_etype1002, $_size999);
+ for ($_i1003 = 0; $_i1003 < $_size999; ++$_i1003)
{
- $elem997 = null;
- $xfer += $input->readString($elem997);
- $this->success []= $elem997;
+ $elem1004 = null;
+ $xfer += $input->readString($elem1004);
+ $this->success []= $elem1004;
}
$xfer += $input->readListEnd();
} else {
@@ -23793,9 +23791,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result {
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter998)
+ foreach ($this->success as $iter1005)
{
- $xfer += $output->writeString($iter998);
+ $xfer += $output->writeString($iter1005);
}
}
$output->writeListEnd();
@@ -25108,15 +25106,15 @@ class ThriftHiveMetastore_add_partitions_args {
case 1:
if ($ftype == TType::LST) {
$this->new_parts = array();
- $_size999 = 0;
- $_etype1002 = 0;
- $xfer += $input->readListBegin($_etype1002, $_size999);
- for ($_i1003 = 0; $_i1003 < $_size999; ++$_i1003)
+ $_size1006 = 0;
+ $_etype1009 = 0;
+ $xfer += $input->readListBegin($_etype1009, $_size1006);
+ for ($_i1010 = 0; $_i1010 < $_size1006; ++$_i1010)
{
- $elem1004 = null;
- $elem1004 = new \metastore\Partition();
- $xfer += $elem1004->read($input);
- $this->new_parts []= $elem1004;
+ $elem1011 = null;
+ $elem1011 = new \metastore\Partition();
+ $xfer += $elem1011->read($input);
+ $this->new_parts []= $elem1011;
}
$xfer += $input->readListEnd();
} else {
@@ -25144,9 +25142,9 @@ class ThriftHiveMetastore_add_partitions_args {
{
$output->writeListBegin(TType::STRUCT, count($this->new_parts));
{
- foreach ($this->new_parts as $iter1005)
+ foreach ($this->new_parts as $iter1012)
{
- $xfer += $iter1005->write($output);
+ $xfer += $iter1012->write($output);
}
}
$output->writeListEnd();
@@ -25361,15 +25359,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args {
case 1:
if ($ftype == TType::LST) {
$this->new_parts = array();
- $_size1006 = 0;
- $_etype1009 = 0;
- $xfer += $input->readListBegin($_etype1009, $_size1006);
- for ($_i1010 = 0; $_i1010 < $_size1006; ++$_i1010)
+ $_size1013 = 0;
+ $_etype1016 = 0;
+ $xfer += $input->readListBegin($_etype1016, $_size1013);
+ for ($_i1017 = 0; $_i1017 < $_size1013; ++$_i1017)
{
- $elem1011 = null;
- $elem1011 = new \metastore\PartitionSpec();
- $xfer += $elem1011->read($input);
- $this->new_parts []= $elem1011;
+ $elem1018 = null;
+ $elem1018 = new \metastore\PartitionSpec();
+ $xfer += $elem1018->read($input);
+ $this->new_parts []= $elem1018;
}
$xfer += $input->readListEnd();
} else {
@@ -25397,9 +25395,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args {
{
$output->writeListBegin(TType::STRUCT, count($this->new_parts));
{
- foreach ($this->new_parts as $iter1012)
+ foreach ($this->new_parts as $iter1019)
{
- $xfer += $iter1012->write($output);
+ $xfer += $iter1019->write($output);
}
}
$output->writeListEnd();
@@ -25649,14 +25647,14 @@ class ThriftHiveMetastore_append_partition_args {
case 3:
if ($ftype == TType::LST) {
$this->part_vals = array();
- $_size1013 = 0;
- $_etype1016 = 0;
- $xfer += $input->readListBegin($_etype1016, $_size1013);
- for ($_i1017 = 0; $_i1017 < $_size1013; ++$_i1017)
+ $_size1020 = 0;
+ $_etype1023 = 0;
+ $xfer += $input->readListBegin($_etype1023, $_size1020);
+ for ($_i1024 = 0; $_i1024 < $_size1020; ++$_i1024)
{
- $elem1018 = null;
- $xfer += $input->readString($elem1018);
- $this->part_vals []= $elem1018;
+ $elem1025 = null;
+ $xfer += $input->readString($elem1025);
+ $this->part_vals []= $elem1025;
}
$xfer += $input->readListEnd();
} else {
@@ -25694,9 +25692,9 @@ class ThriftHiveMetastore_append_partition_args {
{
$output->writeListBegin(TType::STRING, count($this->part_vals));
{
- foreach ($this->part_vals as $iter1019)
+ foreach ($this->part_vals as $iter1026)
{
- $xfer += $output->writeString($iter1019);
+ $xfer += $output->writeString($iter1026);
}
}
$output->writeListEnd();
@@ -26198,14 +26196,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args {
case 3:
if ($ftype == TType::LST) {
$this->part_vals = array();
- $_size1020 = 0;
- $_etype1023 = 0;
- $xfer += $input->readListBegin($_etype1023, $_size1020);
- for ($_i1024 = 0; $_i1024 < $_size1020; ++$_i1024)
+ $_size1027 = 0;
+ $_etype1030 = 0;
+ $xfer += $input->readListBegin($_etype1030, $_size1027);
+ for ($_i1031 = 0; $_i1031 < $_size1027; ++$_i1031)
{
- $elem1025 = null;
- $xfer += $input->readString($elem1025);
- $this->part_vals []= $elem1025;
+ $elem1032 = null;
+ $xfer += $input->readString($elem1032);
+ $this->part_vals []= $elem1032;
}
$xfer += $input->readListEnd();
} else {
@@ -26251,9 +26249,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args {
{
$output->writeListBegin(TType::STRING, count($this->part_vals));
{
- foreach ($this->part_vals as $iter1026)
+ foreach ($this->part_vals as $iter1033)
{
- $xfer += $output->writeString($iter1026);
+ $xfer += $output->writeString($iter1033);
}
}
$output->writeListEnd();
@@ -27107,14 +27105,14 @@ class ThriftHiveMetastore_drop_partition_args {
case 3:
if ($ftype == TType::LST) {
$this->part_vals = array();
- $_size1027 = 0;
- $_etype1030 = 0;
- $xfer += $input->readListBegin($_etype1030, $_size1027);
- for ($_i1031 = 0; $_i1031 < $_size1027; ++$_i1031)
+ $_size1034 = 0;
+ $_etype1037 = 0;
+ $xfer += $input->readListBegin($_etype1037, $_size1034);
+ for ($_i1038 = 0; $_i1038 < $_size1034; ++$_i1038)
{
- $elem1032 = null;
- $xfer += $input->readString($elem1032);
- $this->part_vals []= $elem1032;
+ $elem1039 = null;
+ $xfer += $input->readString($elem1039);
+ $this->part_vals []= $elem1039;
}
$xfer += $input->readListEnd();
} else {
@@ -27159,9 +27157,9 @@ class ThriftHiveMetastore_drop_partition_args {
{
$output->writeListBegin(TType::STRING, count($this->part_vals));
{
- foreach ($this->part_vals as $iter1033)
+ foreach ($this->part_vals as $iter1040)
{
- $xfer += $output->writeString($iter1033);
+ $xfer += $output->writeString($iter1040);
}
}
$output->writeListEnd();
@@ -27414,14 +27412,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args {
case 3:
if ($ftype == TType::LST) {
$this->part_vals = array();
- $_size1034 = 0;
- $_etype1037 = 0;
- $xfer += $input->readListBegin($_etype1037, $_size1034);
- for ($_i1038 = 0; $_i1038 < $_size1034; ++$_i1038)
+ $_size1041 = 0;
+ $_etype1044 = 0;
+ $xfer += $input->readListBegin($_etype1044, $_size1041);
+ for ($_i1045 = 0; $_i1045 < $_size1041; ++$_i1045)
{
- $elem1039 = null;
- $xfer += $input->readString($elem1039);
- $this->part_vals []= $elem1039;
+ $elem1046 = null;
+ $xfer += $input->readString($elem1046);
+ $this->part_vals []= $elem1046;
}
$xfer += $input->readListEnd();
} else {
@@ -27474,9 +27472,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args {
{
$output->writeListBegin(TType::STRING, count($this->part_vals));
{
- foreach ($this->part_vals as $iter1040)
+ foreach ($this->part_vals as $iter1047)
{
- $xfer += $output->writeString($iter1040);
+ $xfer += $output->writeString($iter1047);
}
}
$output->writeListEnd();
@@ -28490,14 +28488,14 @@ class ThriftHiveMetastore_get_partition_args {
case 3:
if ($ftype == TType::LST) {
$this->part_vals = array();
- $_size1041 = 0;
- $_etype1044 = 0;
- $xfer += $input->readListBegin($_etype1044, $_size1041);
- for ($_i1045 = 0; $_i1045 < $_size1041; ++$_i1045)
+ $_size1048 = 0;
+ $_etype1051 = 0;
+ $xfer += $input->readListBegin($_etype1051, $_size1048);
+ for ($_i1052 = 0; $_i1052 < $_size1048; ++$_i1052)
{
- $elem1046 = null;
- $xfer += $input->readString($elem1046);
- $this->part_vals []= $elem1046;
+ $elem1053 = null;
+ $xfer += $input->readString($elem1053);
+ $this->part_vals []= $elem1053;
}
$xfer += $input->readListEnd();
} else {
@@ -28535,9 +28533,9 @@ class ThriftHiveMetastore_get_partition_args {
{
$output->writeListBegin(TType::STRING, count($this->part_vals));
{
- foreach ($this->part_vals as $iter1047)
+ foreach ($this->part_vals as $iter1054)
{
- $xfer += $output->writeString($iter1047);
+ $xfer += $output->writeString($iter1054);
}
}
$output->writeListEnd();
@@ -28779,17 +28777,17 @@ class ThriftHiveMetastore_exchange_partition_args {
case 1:
if ($ftype == TType::MAP) {
$this->partitionSpecs = array();
- $_size1048 = 0;
- $_ktype1049 = 0;
- $_vtype1050 = 0;
- $xfer += $input->readMapBegin($_ktype1049, $_vtype1050, $_size1048);
- for ($_i1052 = 0; $_i1052 < $_size1048; ++$_i1052)
+ $_size1055 = 0;
+ $_ktype1056 = 0;
+ $_vtype1057 = 0;
+ $xfer += $input->readMapBegin($_ktype1056, $_vtype1057, $_size1055);
+ for ($_i1059 = 0; $_i1059 < $_size1055; ++$_i1059)
{
- $key1053 = '';
- $val1054 = '';
- $xfer += $input->readString($key1053);
- $xfer += $input->readString($val1054);
- $this->partitionSpecs[$key1053] = $val1054;
+ $key1060 = '';
+ $val1061 = '';
+ $xfer += $input->readString($key1060);
+ $xfer += $input->readString($val1061);
+ $this->partitionSpecs[$key1060] = $val1061;
}
$xfer += $input->readMapEnd();
} else {
@@ -28845,10 +28843,10 @@ class ThriftHiveMetastore_exchange_partition_args {
{
$output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
{
- foreach ($this->partitionSpecs as $kiter1055 => $viter1056)
+ foreach ($this->partitionSpecs as $kiter1062 => $viter1063)
{
- $xfer += $output->writeString($kiter1055);
- $xfer += $output->writeString($viter1056);
+ $xfer += $output->writeString($kiter1062);
+ $xfer += $output->writeString($viter1063);
}
}
$output->writeMapEnd();
@@ -29160,17 +29158,17 @@ class ThriftHiveMetastore_exchange_partitions_args {
case 1:
if ($ftype == TType::MAP) {
$this->partitionSpecs = array();
- $_size1057 = 0;
- $_ktype1058 = 0;
- $_vtype1059 = 0;
- $xfer += $input->readMapBegin($_ktype1058, $_vtype1059, $_size1057);
- for ($_i1061 = 0; $_i1061 < $_size1057; ++$_i1061)
+ $_size1064 = 0;
+ $_ktype1065 = 0;
+ $_vtype1066 = 0;
+ $xfer += $input->readMapBegin($_ktype1065, $_vtype1066, $_size1064);
+ for ($_i1068 = 0; $_i1068 < $_size1064; ++$_i1068)
{
- $key1062 = '';
- $val1063 = '';
- $xfer += $input->readString($key1062);
- $xfer += $input->readString($val1063);
- $this->partitionSpecs[$key1062] = $val1063;
+ $key1069 = '';
+ $val1070 = '';
+ $xfer += $input->readString($key1069);
+ $xfer += $input->readString($val1070);
+ $this->partitionSpecs[$key1069] = $val1070;
}
$xfer += $input->readMapEnd();
} else {
@@ -29226,10 +29224,10 @@ class ThriftHiveMetastore_exchange_partitions_args {
{
$output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
{
- foreach ($this->partitionSpecs as $kiter1064 => $viter1065)
+ foreach ($this->partitionSpecs as $kiter1071 => $viter1072)
{
- $xfer += $output->writeString($kiter1064);
- $xfer += $output->writeString($viter1065);
+ $xfer += $output->writeString($kiter1071);
+ $xfer += $output->writeString($viter1072);
}
}
$output->writeMapEnd();
@@ -29362,15 +29360,15 @@ class ThriftHiveMetastore_exchange_partitions_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size1066 = 0;
- $_etype1069 = 0;
- $xfer += $input->readListBegin($_etype1069, $_size1066);
- for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070)
+ $_size1073 = 0;
+ $_etype1076 = 0;
+ $xfer += $input->readListBegin($_etype1076, $_size1073);
+ for ($_i1077 = 0; $_i1077 < $_size1073; ++$_i1077)
{
- $elem1071 = null;
- $elem1071 = new \metastore\Partition();
- $xfer += $elem1071->read($input);
- $this->success []= $elem1071;
+ $elem1078 = null;
+ $elem1078 = new \metastore\Partition();
+ $xfer += $elem1078->read($input);
+ $this->success []= $elem1078;
}
$xfer += $input->readListEnd();
} else {
@@ -29430,9 +29428,9 @@ class ThriftHiveMetastore_exchange_partitions_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter1072)
+ foreach ($this->success as $iter1079)
{
- $xfer += $iter1072->write($output);
+ $xfer += $iter1079->write($output);
}
}
$output->writeListEnd();
@@ -29578,14 +29576,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
case 3:
if ($ftype == TType::LST) {
$this->part_vals = array();
- $_size1073 = 0;
- $_etype1076 = 0;
- $xfer += $input->readListBegin($_etype1076, $_size1073);
- for ($_i1077 = 0; $_i1077 < $_size1073; ++$_i1077)
+ $_size1080 = 0;
+ $_etype1083 = 0;
+ $xfer += $input->readListBegin($_etype1083, $_size1080);
+ for ($_i1084 = 0; $_i1084 < $_size1080; ++$_i1084)
{
- $elem1078 = null;
- $xfer += $input->readString($elem1078);
- $this->part_vals []= $elem1078;
+ $elem1085 = null;
+ $xfer += $input->readString($elem1085);
+ $this->part_vals []= $elem1085;
}
$xfer += $input->readListEnd();
} else {
@@ -29602,14 +29600,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
case 5:
if ($ftype == TType::LST) {
$this->group_names = array();
- $_size1079 = 0;
- $_etype1082 = 0;
- $xfer += $input->readListBegin($_etype1082, $_size1079);
- for ($_i1083 = 0; $_i1083 < $_size1079; ++$_i1083)
+ $_size1086 = 0;
+ $_etype1089 = 0;
+ $xfer += $input->readListBegin($_etype1089, $_size1086);
+ for ($_i1090 = 0; $_i1090 < $_size1086; ++$_i1090)
{
- $elem1084 = null;
- $xfer += $input->readString($elem1084);
- $this->group_names []= $elem1084;
+ $elem1091 = null;
+ $xfer += $input->readString($elem1091);
+ $this->group_names []= $elem1091;
}
$xfer += $input->readListEnd();
} else {
@@ -29647,9 +29645,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
{
$output->writeListBegin(TType::STRING, count($this->part_vals));
{
- foreach ($this->part_vals as $iter1085)
+ foreach ($this->part_vals as $iter1092)
{
- $xfer += $output->writeString($iter1085);
+ $xfer += $output->writeString($iter1092);
}
}
$output->writeListEnd();
@@ -29669,9 +29667,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
{
$output->writeListBegin(TType::STRING, count($this->group_names));
{
- foreach ($this->group_names as $iter1086)
+ foreach ($this->group_names as $iter1093)
{
- $xfer += $output->writeString($iter1086);
+ $xfer += $output->writeString($iter1093);
}
}
$output->writeListEnd();
@@ -30262,15 +30260,15 @@ class ThriftHiveMetastore_get_partitions_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size1087 = 0;
- $_etype1090 = 0;
- $xfer += $input->readListBegin($_etype1090, $_size1087);
- for ($_i1091 = 0; $_i1091 < $_size1087; ++$_i1091)
+ $_size1094 = 0;
+ $_etype1097 = 0;
+ $xfer += $input->readListBegin($_etype1097, $_size1094);
+ for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098)
{
- $elem1092 = null;
- $elem1092 = new \metastore\Partition();
- $xfer += $elem1092->read($input);
- $this->success []= $elem1092;
+ $elem1099 = null;
+ $elem1099 = new \metastore\Partition();
+ $xfer += $elem1099->read($input);
+ $this->success []= $elem1099;
}
$xfer += $input->readListEnd();
} else {
@@ -30314,9 +30312,9 @@ class ThriftHiveMetastore_get_partitions_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter1093)
+ foreach ($this->success as $iter1100)
{
- $xfer += $iter1093->write($output);
+ $xfer += $iter1100->write($output);
}
}
$output->writeListEnd();
@@ -30462,14 +30460,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
case 5:
if ($ftype == TType::LST) {
$this->group_names = array();
- $_size1094 = 0;
- $_etype1097 = 0;
- $xfer += $input->readListBegin($_etype1097, $_size1094);
- for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098)
+ $_size1101 = 0;
+ $_etype1104 = 0;
+ $xfer += $input->readListBegin($_etype1104, $_size1101);
+ for ($_i1105 = 0; $_i1105 < $_size1101; ++$_i1105)
{
- $elem1099 = null;
- $xfer += $input->readString($elem1099);
- $this->group_names []= $elem1099;
+ $elem1106 = null;
+ $xfer += $input->readString($elem1106);
+ $this->group_names []= $elem1106;
}
$xfer += $input->readListEnd();
} else {
@@ -30517,9 +30515,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
{
$output->writeListBegin(TType::STRING, count($this->group_names));
{
- foreach ($this->group_names as $iter1100)
+ foreach ($this->group_names as $iter1107)
{
- $xfer += $output->writeString($iter1100);
+ $xfer += $output->writeString($iter1107);
}
}
$output->writeListEnd();
@@ -30608,15 +30606,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size1101 = 0;
- $_etype1104 = 0;
- $xfer += $input->readListBegin($_etype1104, $_size1101);
- for ($_i1105 = 0; $_i1105 < $_size1101; ++$_i1105)
+ $_size1108 = 0;
+ $_etype1111 = 0;
+ $xfer += $input->readListBegin($_etype1111, $_size1108);
+ for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112)
{
- $elem1106 = null;
- $elem1106 = new \metastore\Partition();
- $xfer += $elem1106->read($input);
- $this->success []= $elem1106;
+ $elem1113 = null;
+ $elem1113 = new \metastore\Partition();
+ $xfer += $elem1113->read($input);
+ $this->success []= $elem1113;
}
$xfer += $input->readListEnd();
} else {
@@ -30660,9 +30658,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter1107)
+ foreach ($this->success as $iter1114)
{
- $xfer += $iter1107->write($output);
+ $xfer += $iter1114->write($output);
}
}
$output->writeListEnd();
@@ -30882,15 +30880,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size1108 = 0;
- $_etype1111 = 0;
- $xfer += $input->readListBegin($_etype1111, $_size1108);
- for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112)
+ $_size1115 = 0;
+ $_etype1118 = 0;
+ $xfer += $input->readListBegin($_etype1118, $_size1115);
+ for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119)
{
- $elem1113 = null;
- $elem1113 = new \metastore\PartitionSpec();
- $xfer += $elem1113->read($input);
- $this->success []= $elem1113;
+ $elem1120 = null;
+ $elem1120 = new \metastore\PartitionSpec();
+ $xfer += $elem1120->read($input);
+ $this->success []= $elem1120;
}
$xfer += $input->readListEnd();
} else {
@@ -30934,9 +30932,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter1114)
+ foreach ($this->success as $iter1121)
{
- $xfer += $iter1114->write($output);
+ $xfer += $iter1121->write($output);
}
}
$output->writeListEnd();
@@ -31155,14 +31153,14 @@ class ThriftHiveMetastore_get_partition_names_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size1115 = 0;
- $_etype1118 = 0;
- $xfer += $input->readListBegin($_etype1118, $_size1115);
- for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119)
+ $_size1122 = 0;
+ $_etype1125 = 0;
+ $xfer += $input->readListBegin($_etype1125, $_size1122);
+ for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126)
{
- $elem1120 = null;
- $xfer += $input->readString($elem1120);
- $this->success []= $elem1120;
+ $elem1127 = null;
+ $xfer += $input->readString($elem1127);
+ $this->success []= $elem1127;
}
$xfer += $input->readListEnd();
} else {
@@ -31206,9 +31204,9 @@ class ThriftHiveMetastore_get_partition_names_result {
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter1121)
+ foreach ($this->success as $iter1128)
{
- $xfer += $output->writeString($iter1121);
+ $xfer += $output->writeString($iter1128);
}
}
$output->writeListEnd();
@@ -31539,14 +31537,14 @@ class ThriftHiveMetastore_get_partitions_ps_args {
case 3:
if ($ftype == TType::LST) {
$this->part_vals = array();
- $_size1122 = 0;
- $_etype1125 = 0;
- $xfer += $input->readListBegin($_etype1125, $_size1122);
- for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126)
+ $_size1129 = 0;
+ $_etype1132 = 0;
+ $xfer += $input->readListBegin($_etype1132, $_size1129);
+ for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133)
{
- $elem1127 = null;
- $xfer += $input->readString($elem1127);
- $this->part_vals []= $elem1127;
+ $elem1134 = null;
+ $xfer += $input->readString($elem1134);
+ $this->part_vals []= $elem1134;
}
$xfer += $input->readListEnd();
} else {
@@ -31591,9 +31589,9 @@ class ThriftHiveMetastore_get_partitions_ps_args {
{
$output->writeListBegin(TType::STRING, count($this->part_vals));
{
- foreach ($this->part_vals as $iter1128)
+ foreach ($this->part_vals as $iter1135)
{
- $xfer += $output->writeString($iter1128);
+ $xfer += $output->writeString($iter1135);
}
}
$output->writeListEnd();
@@ -31687,15 +31685,15 @@ class ThriftHiveMetastore_get_partitions_ps_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size1129 = 0;
- $_etype1132 = 0;
- $xfer += $input->readListBegin($_etype1132, $_size1129);
- for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133)
+ $_size1136 = 0;
+ $_etype1139 = 0;
+ $xfer += $input->readListBegin($_etype1139, $_size1136);
+ for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140)
{
- $elem1134 = null;
- $elem1134 = new \metastore\Partition();
- $xfer += $elem1134->read($input);
- $this->success []= $elem1134;
+ $elem1141 = null;
+ $elem1141 = new \metastore\Partition();
+ $xfer += $elem1141->read($input);
+ $this->success []= $elem1141;
}
$xfer += $input->readListEnd();
} else {
@@ -31739,9 +31737,9 @@ class ThriftHiveMetastore_get_partitions_ps_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter1135)
+ foreach ($this->success as $iter1142)
{
- $xfer += $iter1135->write($output);
+ $xfer += $iter1142->write($output);
}
}
$output->writeListEnd();
@@ -31888,14 +31886,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
case 3:
if ($ftype == TType::LST) {
$this->part_vals = array();
- $_size1136 = 0;
- $_etype1139 = 0;
- $xfer += $input->readListBegin($_etype1139, $_size1136);
- for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140)
+ $_size1143 = 0;
+ $_etype1146 = 0;
+ $xfer += $input->readListBegin($_etype1146, $_size1143);
+ for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147)
{
- $elem1141 = null;
- $xfer += $input->readString($elem1141);
- $this->part_vals []= $elem1141;
+ $elem1148 = null;
+ $xfer += $input->readString($elem1148);
+ $this->part_vals []= $elem1148;
}
$xfer += $input->readListEnd();
} else {
@@ -31919,14 +31917,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
case 6:
if ($ftype == TType::LST) {
$this->group_names = array();
- $_size1142 = 0;
- $_etype1145 = 0;
- $xfer += $input->readListBegin($_etype1145, $_size1142);
- for ($_i1146 = 0; $_i1146 < $_size1142; ++$_i1146)
+ $_size1149 = 0;
+ $_etype1152 = 0;
+ $xfer += $input->readListBegin($_etype1152, $_size1149);
+ for ($_i1153 = 0; $_i1153 < $_size1149; ++$_i1153)
{
- $elem1147 = null;
- $xfer += $input->readString($elem1147);
- $this->group_names []= $elem1147;
+ $elem1154 = null;
+ $xfer += $input->readString($elem1154);
+ $this->group_names []= $elem1154;
}
$xfer += $input->readListEnd();
} else {
@@ -31964,9 +31962,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
{
$output->writeListBegin(TType::STRING, count($this->part_vals));
{
- foreach ($this->part_vals as $iter1148)
+ foreach ($this->part_vals as $iter1155)
{
- $xfer += $output->writeString($iter1148);
+ $xfer += $output->writeString($iter1155);
}
}
$output->writeListEnd();
@@ -31991,9 +31989,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
{
$output->writeListBegin(TType::STRING, count($this->group_names));
{
- foreach ($this->group_names as $iter1149)
+ foreach ($this->group_names as $iter1156)
{
- $xfer += $output->writeString($iter1149);
+ $xfer += $output->writeString($iter1156);
}
}
$output->writeListEnd();
@@ -32082,15 +32080,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size1150 = 0;
- $_etype1153 = 0;
- $xfer += $input->readListBegin($_etype1153, $_size1150);
- for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154)
+ $_size1157 = 0;
+ $_etype1160 = 0;
+ $xfer += $input->readListBegin($_etype1160, $_size1157);
+ for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161)
{
- $elem1155 = null;
- $elem1155 = new \metastore\Partition();
- $xfer += $elem1155->read($input);
- $this->success []= $elem1155;
+ $elem1162 = null;
+ $elem1162 = new \metastore\Partition();
+ $xfer += $elem1162->read($input);
+ $this->success []= $elem1162;
}
$xfer += $input->readListEnd();
} else {
@@ -32134,9 +32132,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter1156)
+ foreach ($this->success as $iter1163)
{
- $xfer += $iter1156->write($output);
+ $xfer += $iter1163->write($output);
}
}
$output->writeListEnd();
@@ -32257,14 +32255,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
case 3:
if ($ftype == TType::LST) {
$this->part_vals = array();
- $_size1157 = 0;
- $_etype1160 = 0;
- $xfer += $input->readListBegin($_etype1160, $_size1157);
- for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161)
+ $_size1164 = 0;
+ $_etype1167 = 0;
+ $xfer += $input->readListBegin($_etype1167, $_size1164);
+ for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168)
{
- $elem1162 = null;
- $xfer += $input->readString($elem1162);
- $this->part_vals []= $elem1162;
+ $elem1169 = null;
+ $xfer += $input->readString($elem1169);
+ $this->part_vals []= $elem1169;
}
$xfer += $input->readListEnd();
} else {
@@ -32309,9 +32307,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
{
$output->writeListBegin(TType::STRING, count($this->part_vals));
{
- foreach ($this->part_vals as $iter1163)
+ foreach ($this->part_vals as $iter1170)
{
- $xfer += $output->writeString($iter1163);
+ $xfer += $output->writeString($iter1170);
}
}
$output->writeListEnd();
@@ -32404,14 +32402,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size1164 = 0;
- $_etype1167 = 0;
- $xfer += $input->readListBegin($_etype1167, $_size1164);
- for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168)
+ $_size1171 = 0;
+ $_etype1174 = 0;
+ $xfer += $input->readListBegin($_etype1174, $_size1171);
+ for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175)
{
- $elem1169 = null;
- $xfer += $input->readString($elem1169);
- $this->success []= $elem1169;
+ $elem1176 = null;
+ $xfer += $input->readString($elem1176);
+ $this->success []= $elem1176;
}
$xfer += $input->readListEnd();
} else {
@@ -32455,9 +32453,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
{
$output->writeListBegin(TType::STRING, count($this->success));
{
- foreach ($this->success as $iter1170)
+ foreach ($this->success as $iter1177)
{
- $xfer += $output->writeString($iter1170);
+ $xfer += $output->writeString($iter1177);
}
}
$output->writeListEnd();
@@ -32700,15 +32698,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size1171 = 0;
- $_etype1174 = 0;
- $xfer += $input->readListBegin($_etype1174, $_size1171);
- for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175)
+ $_size1178 = 0;
+ $_etype1181 = 0;
+ $xfer += $input->readListBegin($_etype1181, $_size1178);
+ for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182)
{
- $elem1176 = null;
- $elem1176 = new \metastore\Partition();
- $xfer += $elem1176->read($input);
- $this->success []= $elem1176;
+ $elem1183 = null;
+ $elem1183 = new \metastore\Partition();
+ $xfer += $elem1183->read($input);
+ $this->success []= $elem1183;
}
$xfer += $input->readListEnd();
} else {
@@ -32752,9 +32750,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter1177)
+ foreach ($this->success as $iter1184)
{
- $xfer += $iter1177->write($output);
+ $xfer += $iter1184->write($output);
}
}
$output->writeListEnd();
@@ -32997,15 +32995,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size1178 = 0;
- $_etype1181 = 0;
- $xfer += $input->readListBegin($_etype1181, $_size1178);
- for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182)
+ $_size1185 = 0;
+ $_etype1188 = 0;
+ $xfer += $input->readListBegin($_etype1188, $_size1185);
+ for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189)
{
- $elem1183 = null;
- $elem1183 = new \metastore\PartitionSpec();
- $xfer += $elem1183->read($input);
- $this->success []= $elem1183;
+ $elem1190 = null;
+ $elem1190 = new \metastore\PartitionSpec();
+ $xfer += $elem1190->read($input);
+ $this->success []= $elem1190;
}
$xfer += $input->readListEnd();
} else {
@@ -33049,9 +33047,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter1184)
+ foreach ($this->success as $iter1191)
{
- $xfer += $iter1184->write($output);
+ $xfer += $iter1191->write($output);
}
}
$output->writeListEnd();
@@ -33617,14 +33615,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
case 3:
if ($ftype == TType::LST) {
$this->names = array();
- $_size1185 = 0;
- $_etype1188 = 0;
- $xfer += $input->readListBegin($_etype1188, $_size1185);
- for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189)
+ $_size1192 = 0;
+ $_etype1195 = 0;
+ $xfer += $input->readListBegin($_etype1195, $_size1192);
+ for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196)
{
- $elem1190 = null;
- $xfer += $input->readString($elem1190);
- $this->names []= $elem1190;
+ $elem1197 = null;
+ $xfer += $input->readString($elem1197);
+ $this->names []= $elem1197;
}
$xfer += $input->readListEnd();
} else {
@@ -33662,9 +33660,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
{
$output->writeListBegin(TType::STRING, count($this->names));
{
- foreach ($this->names as $iter1191)
+ foreach ($this->names as $iter1198)
{
- $xfer += $output->writeString($iter1191);
+ $xfer += $output->writeString($iter1198);
}
}
$output->writeListEnd();
@@ -33753,15 +33751,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
case 0:
if ($ftype == TType::LST) {
$this->success = array();
- $_size1192 = 0;
- $_etype1195 = 0;
- $xfer += $input->readListBegin($_etype1195, $_size1192);
- for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196)
+ $_size1199 = 0;
+ $_etype1202 = 0;
+ $xfer += $input->readListBegin($_etype1202, $_size1199);
+ for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203)
{
- $elem1197 = null;
- $elem1197 = new \metastore\Partition();
- $xfer += $elem1197->read($input);
- $this->success []= $elem1197;
+ $elem1204 = null;
+ $elem1204 = new \metastore\Partition();
+ $xfer += $elem1204->read($input);
+ $this->success []= $elem1204;
}
$xfer += $input->readListEnd();
} else {
@@ -33805,9 +33803,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
{
$output->writeListBegin(TType::STRUCT, count($this->success));
{
- foreach ($this->success as $iter1198)
+ foreach ($this->success as $iter1205)
{
- $xfer += $iter1198->write($output);
+ $xfer += $iter1205->write($output);
}
}
$output->writeListEnd();
@@ -34146,15 +34144,15 @@ class ThriftHiveMetastore_alter_partitions_args {
case 3:
if ($ftype == TType::LST) {
$this->new_parts = array();
- $_size1199 = 0;
- $_etype1202 = 0;
- $xfer += $input->readListBegin($_etype1202, $_size1199);
- for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203)
+ $_size1206 = 0;
+ $_etype1209 = 0;
+ $xfer += $input->readListBegin($_etype1209, $_size1206);
+ for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210)
{
- $elem1204 = null;
- $elem1204 = new \metastore\Partition();
- $xfer += $elem1204->read($input);
- $this->new_parts []= $elem1204;
+ $elem1211 = null;
+ $elem1211 = new \metastore\Partition();
+ $xfer += $elem1211->read($input);
+ $this->new_parts []= $elem1211;
}
$xfer += $input->readListEnd();
} else {
@@ -34192,9 +34190,9 @@ class ThriftHiveMetastore_alter_partitions_args {
{
$output->writeListBegin(TType::STRUCT, count($this->new_parts));
{
- foreach ($this->new_parts as $iter1205)
+ foreach ($this->new_parts as $iter1212)
{
- $xfer += $iter1205->write($output);
+ $xfer += $iter1212->write($output);
}
}
$output->writeListEnd();
@@ -34314,61 +34312,23 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
static $_TSPEC;
/**
- * @var string
- */
- public $db_name = null;
- /**
- * @var string
- */
- public $tbl_name = null;
- /**
- * @var \metastore\Partition[]
- */
- public $new_parts = null;
- /**
- * @var \metastore\EnvironmentContext
+ * @var \metastore\AlterPartitionsRequest
*/
- public $environment_context = null;
+ public $req = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
self::$_TSPEC = array(
1 => array(
- 'var' => 'db_name',
- 'type' => TType::STRING,
- ),
- 2 => array(
- 'var' => 'tbl_name',
- 'type' => TType::STRING,
- ),
- 3 => array(
- 'var' => 'new_parts',
- 'type' => TType::LST,
- 'etype' => TType::STRUCT,
- 'elem' => array(
- 'type' => TType::STRUCT,
- 'class' => '\metastore\Partition',
- ),
- ),
- 4 => array(
- 'var' => 'environment_context',
+ 'var' => 'req',
'type' => TType::STRUCT,
- 'class' => '\metastore\EnvironmentContext',
+ 'class' => '\metastore\AlterPartitionsRequest',
),
);
}
if (is_array($vals)) {
- if (isset($vals['db_name'])) {
- $this->db_name = $vals['db_name'];
- }
- if (isset($vals['tbl_name'])) {
- $this->tbl_name = $vals['tbl_name'];
- }
- if (isset($vals['new_parts'])) {
- $this->new_parts = $vals['new_parts'];
- }
- if (isset($vals['environment_context'])) {
- $this->environment_context = $vals['environment_context'];
+ if (isset($vals['req'])) {
+ $this->req = $vals['req'];
}
}
}
@@ -34393,41 +34353,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
switch ($fid)
{
case 1:
- if ($ftype == TType::STRING) {
- $xfer += $input->readString($this->db_name);
- } else {
- $xfer += $input->skip($ftype);
- }
- break;
- case 2:
- if ($ftype == TType::STRING) {
- $xfer += $input->readString($this->tbl_name);
- } else {
- $xfer += $input->skip($ftype);
- }
- break;
- case 3:
- if ($ftype == TType::LST) {
- $this->new_parts = array();
- $_size1206 = 0;
- $_etype1209 = 0;
- $xfer += $input->readListBegin($_etype1209, $_size1206);
- for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210)
- {
- $elem1211 = null;
- $elem1211 = new \metastore\Partition();
- $xfer += $elem1211->read($input);
- $this->new_parts []= $elem1211;
- }
- $xfer += $input->readListEnd();
- } else {
- $xfer += $input->skip($ftype);
- }
- break;
- case 4:
if ($ftype == TType::STRUCT) {
- $this->environment_context = new \metastore\EnvironmentContext();
- $xfer += $this->environment_context->read($input);
+ $this->req = new \metastore\AlterPartitionsRequest();
+ $xfer += $this->req->read($input);
} else {
$xfer += $input->skip($ftype);
}
@@ -34445,39 +34373,12 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
public function write($output) {
$xfer = 0;
$xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_partitions_with_environment_context_args');
- if ($this->db_name !== null) {
- $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1);
- $xfer += $output->writeString($this->db_name);
- $xfer += $output->writeFieldEnd();
- }
- if ($this->tbl_name !== null) {
- $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2);
- $xfer += $output->writeString($this->tbl_name);
- $xfer += $output->writeFieldEnd();
- }
- if ($this->new_parts !== null) {
- if (!is_array($this->new_parts)) {
- throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
- }
- $xfer += $output->writeFieldBegin('new_parts', TType::LST, 3);
- {
- $output->writeListBegin(TType::STRUCT, count($this->new_parts));
- {
- foreach ($this->new_parts as $iter1212)
- {
- $xfer += $iter1212->write($output);
- }
- }
- $output->writeListEnd();
- }
- $xfer += $output->writeFieldEnd();
- }
- if ($this->environment_context !== null) {
- if (!is_object($this->environment_context)) {
+ if ($this->req !== null) {
+ if (!is_object($this->req)) {
throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
}
- $xfer += $output->writeFieldBegin('environment_context', TType::STRUCT, 4);
- $xfer += $this->environment_context->write($output);
+ $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1);
+ $xfer += $this->req->write($output);
$xfer += $output->writeFieldEnd();
}
$xfer += $output->writeFieldStop();
@@ -34491,6 +34392,10 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result {
static $_TSPEC;
/**
+ * @var \metastore\AlterPartitionsResponse
+ */
+ public $success = null;
+ /**
* @var \metastore\InvalidOperationException
*/
public $o1 = null;
@@ -34502,6 +34407,11 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result {
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
self::$_TSPEC = array(
+ 0 => array(
+ 'var' => 'success',
+ 'type' => TType::STRUCT,
+ 'class' => '\metastore\AlterPartitionsResponse',
+ ),
1 => array(
'var' => 'o1',
'type' => TType::STRUCT,
@@ -34515,6 +34425,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result {
);
}
if (is_array($vals)) {
+ if (isset($vals['success'])) {
+ $this->success = $vals['success'];
+ }
if (isset($vals['o1'])) {
$this->o1 = $vals['o1'];
}
@@ -34543,6 +34456,14 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result {
}
switch ($fid)
{
+ case 0:
+ if ($ftype == TType::STRUCT) {
+ $this->success = new \metastore\AlterPartitionsResponse();
+ $xfer += $this->success->read($input);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
case 1:
if ($ftype == TType::STRUCT) {
$this->o1 = new \metastore\InvalidOperationException();
@@ -34572,6 +34493,14 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_result {
public function write($output) {
$xfer = 0;
$xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_partitions_with_environment_context_result');
+ if ($this->success !== null) {
+ if (!is_object($this->success)) {
+ throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+ }
+ $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0);
+ $xfer += $this->success->write($output);
+
<TRUNCATED>
[31/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/llap_text.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_text.q.out b/ql/src/test/results/clientpositive/llap/llap_text.q.out
new file mode 100644
index 0000000..40d08d3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/llap_text.q.out
@@ -0,0 +1,1082 @@
+PREHOOK: query: DROP TABLE text_llap
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE text_llap
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE text_llap(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+ cboolean2 BOOLEAN)
+row format serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+stored as inputformat "org.apache.hadoop.mapred.TextInputFormat"
+
+ outputformat "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@text_llap
+POSTHOOK: query: CREATE TABLE text_llap(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+ cboolean2 BOOLEAN)
+row format serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+stored as inputformat "org.apache.hadoop.mapred.TextInputFormat"
+
+ outputformat "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@text_llap
+PREHOOK: query: insert into table text_llap
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 from alltypesorc
+where cboolean2 is not null or cstring1 is not null or ctinyint is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@text_llap
+POSTHOOK: query: insert into table text_llap
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 from alltypesorc
+where cboolean2 is not null or cstring1 is not null or ctinyint is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@text_llap
+POSTHOOK: Lineage: text_llap.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: text_llap.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: text_llap.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: text_llap.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: text_llap.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: text_llap.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: text_llap.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: text_llap.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: text_llap.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: text_llap.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: text_llap.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: text_llap.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: create table text_llap2(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ `dec` decimal,
+ bin binary)
+row format delimited fields terminated by '|'
+stored as inputformat "org.apache.hadoop.mapred.TextInputFormat"
+
+outputformat "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@text_llap2
+POSTHOOK: query: create table text_llap2(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ `dec` decimal,
+ bin binary)
+row format delimited fields terminated by '|'
+stored as inputformat "org.apache.hadoop.mapred.TextInputFormat"
+
+outputformat "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@text_llap2
+PREHOOK: query: load data local inpath '../../data/files/over10k.gz' into table text_llap2
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@text_llap2
+POSTHOOK: query: load data local inpath '../../data/files/over10k.gz' into table text_llap2
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@text_llap2
+PREHOOK: query: create table text_llap1 like text_llap
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@text_llap1
+POSTHOOK: query: create table text_llap1 like text_llap
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@text_llap1
+PREHOOK: query: create table text_llap100 like text_llap
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@text_llap100
+POSTHOOK: query: create table text_llap100 like text_llap
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@text_llap100
+PREHOOK: query: create table text_llap1000 like text_llap
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@text_llap1000
+POSTHOOK: query: create table text_llap1000 like text_llap
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@text_llap1000
+PREHOOK: query: insert into table text_llap1
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 from alltypesorc
+where cboolean2 is not null or cstring1 is not null or ctinyint is not null limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@text_llap1
+POSTHOOK: query: insert into table text_llap1
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 from alltypesorc
+where cboolean2 is not null or cstring1 is not null or ctinyint is not null limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@text_llap1
+POSTHOOK: Lineage: text_llap1.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: text_llap1.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: text_llap1.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: text_llap1.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: text_llap1.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: text_llap1.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: text_llap1.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: text_llap1.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: text_llap1.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: text_llap1.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: text_llap1.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: text_llap1.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: insert into table text_llap100
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 from alltypesorc
+where cboolean2 is not null or cstring1 is not null or ctinyint is not null limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@text_llap100
+POSTHOOK: query: insert into table text_llap100
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 from alltypesorc
+where cboolean2 is not null or cstring1 is not null or ctinyint is not null limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@text_llap100
+POSTHOOK: Lineage: text_llap100.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: text_llap100.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: text_llap100.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: text_llap100.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: text_llap100.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: text_llap100.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: text_llap100.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: text_llap100.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: text_llap100.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: text_llap100.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: text_llap100.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: text_llap100.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: insert into table text_llap1000
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 from alltypesorc
+where cboolean2 is not null or cstring1 is not null or ctinyint is not null limit 1000
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@text_llap1000
+POSTHOOK: query: insert into table text_llap1000
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 from alltypesorc
+where cboolean2 is not null or cstring1 is not null or ctinyint is not null limit 1000
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@text_llap1000
+POSTHOOK: Lineage: text_llap1000.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: text_llap1000.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: text_llap1000.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: text_llap1000.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: text_llap1000.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: text_llap1000.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: text_llap1000.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: text_llap1000.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: text_llap1000.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: text_llap1000.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: text_llap1000.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: text_llap1000.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: select t, s, ts from text_llap2 order by t, s, ts limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@text_llap2
+#### A masked pattern was here ####
+POSTHOOK: query: select t, s, ts from text_llap2 order by t, s, ts limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@text_llap2
+#### A masked pattern was here ####
+-2 alice carson 2013-03-01 09:11:58.703074
+-2 alice nixon 2013-03-01 09:11:58.703321
+-2 alice underhill 2013-03-01 09:11:58.703122
+-2 alice underhill 2013-03-01 09:11:58.703127
+-2 alice xylophone 2013-03-01 09:11:58.703105
+-2 bob falkner 2013-03-01 09:11:58.703071
+-2 bob king 2013-03-01 09:11:58.703236
+-2 bob ovid 2013-03-01 09:11:58.703285
+-2 bob van buren 2013-03-01 09:11:58.703218
+-2 bob xylophone 2013-03-01 09:11:58.703219
+-2 calvin xylophone 2013-03-01 09:11:58.703083
+-2 david falkner 2013-03-01 09:11:58.703254
+-2 david laertes 2013-03-01 09:11:58.703076
+-2 david miller 2013-03-01 09:11:58.703238
+-3 alice allen 2013-03-01 09:11:58.703323
+-3 alice davidson 2013-03-01 09:11:58.703226
+-3 alice falkner 2013-03-01 09:11:58.703304
+-3 alice king 2013-03-01 09:11:58.70314
+-3 alice king 2013-03-01 09:11:58.703247
+-3 alice xylophone 2013-03-01 09:11:58.703129
+-3 bob ellison 2013-03-01 09:11:58.703261
+-3 bob falkner 2013-03-01 09:11:58.70328
+-3 bob ichabod 2013-03-01 09:11:58.70324
+-3 bob johnson 2013-03-01 09:11:58.703204
+-3 bob polk 2013-03-01 09:11:58.703128
+-3 bob underhill 2013-03-01 09:11:58.703176
+-3 bob underhill 2013-03-01 09:11:58.703188
+-3 bob van buren 2013-03-01 09:11:58.703199
+-3 calvin ichabod 2013-03-01 09:11:58.703213
+-3 calvin white 2013-03-01 09:11:58.703295
+-3 david carson 2013-03-01 09:11:58.703136
+-3 david falkner 2013-03-01 09:11:58.703305
+-3 david garcia 2013-03-01 09:11:58.70319
+-3 david hernandez 2013-03-01 09:11:58.703252
+-3 ethan steinbeck 2013-03-01 09:11:58.703079
+-3 ethan underhill 2013-03-01 09:11:58.703138
+-3 fred ellison 2013-03-01 09:11:58.703233
+-3 gabriella brown 2013-03-01 09:11:58.703288
+-3 holly nixon 2013-03-01 09:11:58.703262
+-3 holly polk 2013-03-01 09:11:58.703273
+-3 holly steinbeck 2013-03-01 09:11:58.703242
+-3 holly thompson 2013-03-01 09:11:58.703073
+-3 holly underhill 2013-03-01 09:11:58.703219
+-3 irene ellison 2013-03-01 09:11:58.703092
+-3 irene underhill 2013-03-01 09:11:58.703298
+-3 irene young 2013-03-01 09:11:58.703084
+-3 jessica johnson 2013-03-01 09:11:58.703319
+-3 jessica king 2013-03-01 09:11:58.703279
+-3 jessica miller 2013-03-01 09:11:58.703245
+-3 jessica white 2013-03-01 09:11:58.703199
+-3 katie ichabod 2013-03-01 09:11:58.703139
+-3 luke garcia 2013-03-01 09:11:58.703076
+-3 luke ichabod 2013-03-01 09:11:58.703294
+-3 luke king 2013-03-01 09:11:58.703207
+-3 luke young 2013-03-01 09:11:58.703182
+-3 mike allen 2013-03-01 09:11:58.703292
+-3 mike king 2013-03-01 09:11:58.703214
+-3 mike polk 2013-03-01 09:11:58.70319
+-3 mike white 2013-03-01 09:11:58.703087
+-3 mike xylophone 2013-03-01 09:11:58.703308
+-3 nick nixon 2013-03-01 09:11:58.703083
+-3 nick robinson 2013-03-01 09:11:58.703147
+-3 oscar davidson 2013-03-01 09:11:58.703071
+-3 oscar garcia 2013-03-01 09:11:58.703282
+-3 oscar johnson 2013-03-01 09:11:58.70311
+-3 oscar johnson 2013-03-01 09:11:58.703133
+-3 oscar miller 2013-03-01 09:11:58.70332
+-3 priscilla laertes 2013-03-01 09:11:58.70325
+-3 priscilla quirinius 2013-03-01 09:11:58.703228
+-3 priscilla zipper 2013-03-01 09:11:58.703321
+-3 quinn ellison 2013-03-01 09:11:58.703232
+-3 quinn polk 2013-03-01 09:11:58.703244
+-3 rachel davidson 2013-03-01 09:11:58.703316
+-3 rachel thompson 2013-03-01 09:11:58.703276
+-3 sarah miller 2013-03-01 09:11:58.70316
+-3 sarah robinson 2013-03-01 09:11:58.703288
+-3 sarah xylophone 2013-03-01 09:11:58.703112
+-3 sarah zipper 2013-03-01 09:11:58.703289
+-3 tom hernandez 2013-03-01 09:11:58.703108
+-3 tom hernandez 2013-03-01 09:11:58.703188
+-3 tom polk 2013-03-01 09:11:58.703217
+-3 tom steinbeck 2013-03-01 09:11:58.703251
+-3 ulysses carson 2013-03-01 09:11:58.703253
+-3 ulysses ellison 2013-03-01 09:11:58.703197
+-3 ulysses quirinius 2013-03-01 09:11:58.703189
+-3 ulysses robinson 2013-03-01 09:11:58.703227
+-3 ulysses steinbeck 2013-03-01 09:11:58.703259
+-3 victor allen 2013-03-01 09:11:58.703155
+-3 victor hernandez 2013-03-01 09:11:58.703176
+-3 victor robinson 2013-03-01 09:11:58.703305
+-3 victor thompson 2013-03-01 09:11:58.703299
+-3 victor xylophone 2013-03-01 09:11:58.703135
+-3 wendy quirinius 2013-03-01 09:11:58.703266
+-3 wendy robinson 2013-03-01 09:11:58.703294
+-3 wendy xylophone 2013-03-01 09:11:58.703191
+-3 xavier garcia 2013-03-01 09:11:58.703194
+-3 xavier ovid 2013-03-01 09:11:58.703148
+-3 yuri xylophone 2013-03-01 09:11:58.703258
+-3 zach thompson 2013-03-01 09:11:58.703252
+-3 zach young 2013-03-01 09:11:58.703191
+PREHOOK: query: select * from text_llap2 order by t, s, ts limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@text_llap2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from text_llap2 order by t, s, ts limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@text_llap2
+#### A masked pattern was here ####
+-2 305 65767 4294967529 76.54 4.72 true calvin xylophone 2013-03-01 09:11:58.703083 69 quiet hour
+-2 331 65707 4294967335 67.12 13.51 false bob ovid 2013-03-01 09:11:58.703285 62 joggying
+-2 373 65548 4294967423 16.98 43.6 true alice nixon 2013-03-01 09:11:58.703321 53 debate
+-2 378 65553 4294967461 9.81 10.36 true bob king 2013-03-01 09:11:58.703236 91 opthamology
+-2 389 65706 4294967488 26.68 17.93 false alice underhill 2013-03-01 09:11:58.703122 87 forestry
+-2 389 65738 4294967520 99.45 26.26 true bob falkner 2013-03-01 09:11:58.703071 17 nap time
+-2 393 65715 4294967305 48.3 1.85 true alice xylophone 2013-03-01 09:11:58.703105 30 values clariffication
+-2 406 65582 4294967311 20.94 35.74 false bob van buren 2013-03-01 09:11:58.703218 25 opthamology
+-2 406 65762 4294967443 1.79 33.42 false david falkner 2013-03-01 09:11:58.703254 58 opthamology
+-2 407 65612 4294967318 25.48 41.56 true david laertes 2013-03-01 09:11:58.703076 40 forestry
+-2 427 65666 4294967465 19.69 33.24 true bob xylophone 2013-03-01 09:11:58.703219 33 joggying
+-2 446 65790 4294967302 6.49 10.81 false alice underhill 2013-03-01 09:11:58.703127 44 undecided
+-2 450 65727 4294967487 94.57 30.4 false david miller 2013-03-01 09:11:58.703238 40 religion
+-2 473 65565 4294967320 87.78 12.26 true alice carson 2013-03-01 09:11:58.703074 90 xylophone band
+-3 260 65595 4294967545 59.07 6.75 false bob falkner 2013-03-01 09:11:58.70328 37 chemistry
+-3 264 65776 4294967398 20.95 5.97 false bob polk 2013-03-01 09:11:58.703128 93 joggying
+-3 266 65736 4294967397 19.94 10.01 false quinn ellison 2013-03-01 09:11:58.703232 89 forestry
+-3 268 65710 4294967448 82.74 12.48 true holly polk 2013-03-01 09:11:58.703273 15 undecided
+-3 270 65702 4294967512 38.05 1.07 true david carson 2013-03-01 09:11:58.703136 28 philosophy
+-3 275 65543 4294967522 74.92 17.29 false mike king 2013-03-01 09:11:58.703214 53 opthamology
+-3 275 65575 4294967441 38.22 2.43 true sarah xylophone 2013-03-01 09:11:58.703112 93 wind surfing
+-3 275 65622 4294967302 71.78 8.49 false wendy robinson 2013-03-01 09:11:58.703294 95 undecided
+-3 279 65661 4294967536 25.5 0.02 false wendy quirinius 2013-03-01 09:11:58.703266 75 undecided
+-3 280 65548 4294967350 52.3 33.06 true calvin white 2013-03-01 09:11:58.703295 30 quiet hour
+-3 280 65597 4294967377 18.44 49.8 true alice falkner 2013-03-01 09:11:58.703304 74 zync studies
+-3 280 65769 4294967324 28.78 35.05 true xavier ovid 2013-03-01 09:11:58.703148 43 kindergarten
+-3 284 65566 4294967400 62.81 39.1 false jessica white 2013-03-01 09:11:58.703199 70 opthamology
+-3 286 65573 4294967493 18.27 23.71 false zach young 2013-03-01 09:11:58.703191 22 kindergarten
+-3 289 65757 4294967528 56.2 44.24 true luke ichabod 2013-03-01 09:11:58.703294 7 yard duty
+-3 298 65720 4294967305 34.6 39.7 false ethan steinbeck 2013-03-01 09:11:58.703079 35 kindergarten
+-3 299 65763 4294967542 85.96 10.45 true jessica miller 2013-03-01 09:11:58.703245 26 mathematics
+-3 303 65617 4294967473 10.26 1.41 false ulysses quirinius 2013-03-01 09:11:58.703189 84 chemistry
+-3 307 65634 4294967546 90.3 28.44 false irene underhill 2013-03-01 09:11:58.703298 85 forestry
+-3 311 65569 4294967460 3.82 35.45 false luke garcia 2013-03-01 09:11:58.703076 93 chemistry
+-3 313 65540 4294967316 25.67 39.88 false ulysses robinson 2013-03-01 09:11:58.703227 61 religion
+-3 314 65670 4294967330 13.67 34.86 false wendy xylophone 2013-03-01 09:11:58.703191 85 mathematics
+-3 315 65671 4294967412 94.22 25.96 true oscar johnson 2013-03-01 09:11:58.703133 89 nap time
+-3 316 65696 4294967445 22.0 43.41 false priscilla laertes 2013-03-01 09:11:58.70325 51 values clariffication
+-3 318 65553 4294967452 9.86 32.77 false holly underhill 2013-03-01 09:11:58.703219 47 wind surfing
+-3 320 65644 4294967434 84.39 48.0 false sarah robinson 2013-03-01 09:11:58.703288 72 wind surfing
+-3 324 65773 4294967296 11.07 25.95 true oscar miller 2013-03-01 09:11:58.70332 57 opthamology
+-3 333 65562 4294967359 22.34 35.58 false ulysses steinbeck 2013-03-01 09:11:58.703259 87 xylophone band
+-3 335 65696 4294967333 72.26 9.66 true nick nixon 2013-03-01 09:11:58.703083 85 philosophy
+-3 337 65629 4294967521 55.59 6.54 true luke king 2013-03-01 09:11:58.703207 59 industrial engineering
+-3 337 65658 4294967361 43.4 12.05 false victor allen 2013-03-01 09:11:58.703155 45 topology
+-3 339 65671 4294967311 8.37 15.98 true bob ellison 2013-03-01 09:11:58.703261 14 linguistics
+-3 339 65737 4294967453 14.23 26.66 true ethan underhill 2013-03-01 09:11:58.703138 95 xylophone band
+-3 343 65783 4294967378 7.1 18.16 true ulysses carson 2013-03-01 09:11:58.703253 97 mathematics
+-3 344 65733 4294967363 0.56 11.96 true rachel thompson 2013-03-01 09:11:58.703276 88 wind surfing
+-3 344 65756 4294967378 52.13 18.95 true victor thompson 2013-03-01 09:11:58.703299 81 topology
+-3 346 65752 4294967298 56.05 34.03 false tom polk 2013-03-01 09:11:58.703217 49 zync studies
+-3 350 65566 4294967434 23.22 6.68 true nick robinson 2013-03-01 09:11:58.703147 24 education
+-3 362 65712 4294967325 43.73 48.74 false oscar garcia 2013-03-01 09:11:58.703282 30 chemistry
+-3 374 65731 4294967388 22.35 22.71 true bob johnson 2013-03-01 09:11:58.703204 80 biology
+-3 376 65548 4294967431 96.78 43.23 false fred ellison 2013-03-01 09:11:58.703233 75 education
+-3 376 65766 4294967326 97.88 5.58 true sarah zipper 2013-03-01 09:11:58.703289 49 study skills
+-3 381 65640 4294967379 59.34 7.97 false ulysses ellison 2013-03-01 09:11:58.703197 32 undecided
+-3 384 65613 4294967470 63.49 45.85 false holly steinbeck 2013-03-01 09:11:58.703242 54 chemistry
+-3 384 65676 4294967453 71.97 31.52 false alice davidson 2013-03-01 09:11:58.703226 14 xylophone band
+-3 386 65611 4294967331 58.81 22.43 true sarah miller 2013-03-01 09:11:58.70316 75 mathematics
+-3 386 65716 4294967496 12.12 2.37 false zach thompson 2013-03-01 09:11:58.703252 16 linguistics
+-3 387 65550 4294967355 84.75 22.75 true holly thompson 2013-03-01 09:11:58.703073 52 biology
+-3 400 65557 4294967503 76.31 29.44 false alice allen 2013-03-01 09:11:58.703323 19 debate
+-3 408 65667 4294967509 81.68 45.9 true david hernandez 2013-03-01 09:11:58.703252 52 topology
+-3 414 65608 4294967338 81.39 49.09 true tom steinbeck 2013-03-01 09:11:58.703251 11 xylophone band
+-3 415 65571 4294967536 61.81 24.24 true victor robinson 2013-03-01 09:11:58.703305 23 american history
+-3 423 65646 4294967378 63.19 34.04 false priscilla quirinius 2013-03-01 09:11:58.703228 35 xylophone band
+-3 430 65667 4294967469 65.5 40.46 true yuri xylophone 2013-03-01 09:11:58.703258 31 american history
+-3 431 65635 4294967500 29.06 0.34 false calvin ichabod 2013-03-01 09:11:58.703213 29 undecided
+-3 432 65646 4294967492 0.83 27.18 true oscar davidson 2013-03-01 09:11:58.703071 56 linguistics
+-3 433 65654 4294967455 6.83 5.33 false bob van buren 2013-03-01 09:11:58.703199 29 yard duty
+-3 438 65618 4294967398 62.39 4.62 false victor xylophone 2013-03-01 09:11:58.703135 88 values clariffication
+-3 447 65755 4294967320 43.69 20.03 false victor hernandez 2013-03-01 09:11:58.703176 14 forestry
+-3 448 65610 4294967314 81.97 31.11 true mike xylophone 2013-03-01 09:11:58.703308 79 opthamology
+-3 451 65696 4294967532 6.8 40.07 false luke young 2013-03-01 09:11:58.703182 27 biology
+-3 454 65627 4294967481 17.6 35.72 false bob underhill 2013-03-01 09:11:58.703188 67 religion
+-3 454 65705 4294967468 62.12 14.32 true mike white 2013-03-01 09:11:58.703087 40 joggying
+-3 454 65733 4294967544 73.83 18.42 false bob ichabod 2013-03-01 09:11:58.70324 96 debate
+-3 455 65570 4294967304 2.48 30.76 false alice king 2013-03-01 09:11:58.70314 42 forestry
+-3 458 65563 4294967315 62.77 41.5 false alice king 2013-03-01 09:11:58.703247 3 mathematics
+-3 458 65679 4294967331 64.29 43.8 true irene young 2013-03-01 09:11:58.703084 3 american history
+-3 458 65696 4294967418 45.24 8.49 false irene ellison 2013-03-01 09:11:58.703092 54 american history
+-3 459 65644 4294967456 92.71 0.08 false jessica king 2013-03-01 09:11:58.703279 53 joggying
+-3 465 65551 4294967457 83.39 46.64 true mike allen 2013-03-01 09:11:58.703292 53 values clariffication
+-3 465 65735 4294967298 72.3 22.58 false bob underhill 2013-03-01 09:11:58.703176 81 joggying
+-3 467 65575 4294967437 81.64 23.53 true tom hernandez 2013-03-01 09:11:58.703188 33 study skills
+-3 469 65577 4294967451 88.78 32.96 true katie ichabod 2013-03-01 09:11:58.703139 69 undecided
+-3 469 65698 4294967357 47.51 49.22 true david falkner 2013-03-01 09:11:58.703305 78 joggying
+-3 469 65752 4294967350 55.41 32.11 true oscar johnson 2013-03-01 09:11:58.70311 47 philosophy
+-3 477 65785 4294967464 97.51 10.84 true tom hernandez 2013-03-01 09:11:58.703108 7 history
+-3 485 65661 4294967441 26.21 16.19 false alice xylophone 2013-03-01 09:11:58.703129 97 topology
+-3 485 65669 4294967428 21.34 13.07 false priscilla zipper 2013-03-01 09:11:58.703321 28 quiet hour
+-3 485 65684 4294967483 11.83 8.04 false david garcia 2013-03-01 09:11:58.70319 63 wind surfing
+-3 493 65662 4294967482 28.75 30.21 false xavier garcia 2013-03-01 09:11:58.703194 5 education
+-3 494 65589 4294967369 48.09 14.4 false jessica johnson 2013-03-01 09:11:58.703319 79 nap time
+-3 498 65751 4294967331 80.65 0.28 true gabriella brown 2013-03-01 09:11:58.703288 61 opthamology
+-3 500 65704 4294967480 2.26 28.79 true mike polk 2013-03-01 09:11:58.70319 4 nap time
+-3 505 65565 4294967407 68.73 4.65 true holly nixon 2013-03-01 09:11:58.703262 15 debate
+-3 507 65671 4294967305 60.28 41.5 false quinn polk 2013-03-01 09:11:58.703244 77 industrial engineering
+-3 507 65728 4294967525 81.95 47.14 true rachel davidson 2013-03-01 09:11:58.703316 31 study skills
+PREHOOK: query: select t, f, s from text_llap2 order by t, s, f limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@text_llap2
+#### A masked pattern was here ####
+POSTHOOK: query: select t, f, s from text_llap2 order by t, s, f limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@text_llap2
+#### A masked pattern was here ####
+-2 1.79 david falkner
+-2 16.98 alice nixon
+-2 19.69 bob xylophone
+-2 20.94 bob van buren
+-2 25.48 david laertes
+-2 26.68 alice underhill
+-2 48.3 alice xylophone
+-2 6.49 alice underhill
+-2 67.12 bob ovid
+-2 76.54 calvin xylophone
+-2 87.78 alice carson
+-2 9.81 bob king
+-2 94.57 david miller
+-2 99.45 bob falkner
+-3 0.56 rachel thompson
+-3 0.83 oscar davidson
+-3 10.26 ulysses quirinius
+-3 11.07 oscar miller
+-3 11.83 david garcia
+-3 12.12 zach thompson
+-3 13.67 wendy xylophone
+-3 14.23 ethan underhill
+-3 17.6 bob underhill
+-3 18.27 zach young
+-3 18.44 alice falkner
+-3 19.94 quinn ellison
+-3 2.26 mike polk
+-3 2.48 alice king
+-3 20.95 bob polk
+-3 21.34 priscilla zipper
+-3 22.0 priscilla laertes
+-3 22.34 ulysses steinbeck
+-3 22.35 bob johnson
+-3 23.22 nick robinson
+-3 25.5 wendy quirinius
+-3 25.67 ulysses robinson
+-3 26.21 alice xylophone
+-3 28.75 xavier garcia
+-3 28.78 xavier ovid
+-3 29.06 calvin ichabod
+-3 3.82 luke garcia
+-3 34.6 ethan steinbeck
+-3 38.05 david carson
+-3 38.22 sarah xylophone
+-3 43.4 victor allen
+-3 43.69 victor hernandez
+-3 43.73 oscar garcia
+-3 45.24 irene ellison
+-3 47.51 david falkner
+-3 48.09 jessica johnson
+-3 52.13 victor thompson
+-3 52.3 calvin white
+-3 55.41 oscar johnson
+-3 55.59 luke king
+-3 56.05 tom polk
+-3 56.2 luke ichabod
+-3 58.81 sarah miller
+-3 59.07 bob falkner
+-3 59.34 ulysses ellison
+-3 6.8 luke young
+-3 6.83 bob van buren
+-3 60.28 quinn polk
+-3 61.81 victor robinson
+-3 62.12 mike white
+-3 62.39 victor xylophone
+-3 62.77 alice king
+-3 62.81 jessica white
+-3 63.19 priscilla quirinius
+-3 63.49 holly steinbeck
+-3 64.29 irene young
+-3 65.5 yuri xylophone
+-3 68.73 holly nixon
+-3 7.1 ulysses carson
+-3 71.78 wendy robinson
+-3 71.97 alice davidson
+-3 72.26 nick nixon
+-3 72.3 bob underhill
+-3 73.83 bob ichabod
+-3 74.92 mike king
+-3 76.31 alice allen
+-3 8.37 bob ellison
+-3 80.65 gabriella brown
+-3 81.39 tom steinbeck
+-3 81.64 tom hernandez
+-3 81.68 david hernandez
+-3 81.95 rachel davidson
+-3 81.97 mike xylophone
+-3 82.74 holly polk
+-3 83.39 mike allen
+-3 84.39 sarah robinson
+-3 84.75 holly thompson
+-3 85.96 jessica miller
+-3 88.78 katie ichabod
+-3 9.86 holly underhill
+-3 90.3 irene underhill
+-3 92.71 jessica king
+-3 94.22 oscar johnson
+-3 96.78 fred ellison
+-3 97.51 tom hernandez
+-3 97.88 sarah zipper
+PREHOOK: query: select ctinyint, cstring1, cboolean2 from text_llap100 order by ctinyint, cstring1, cboolean2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@text_llap100
+#### A masked pattern was here ####
+POSTHOOK: query: select ctinyint, cstring1, cboolean2 from text_llap100 order by ctinyint, cstring1, cboolean2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@text_llap100
+#### A masked pattern was here ####
+-1 cvLH6Eat2yFsyy7p NULL
+-11 cvLH6Eat2yFsyy7p NULL
+-11 cvLH6Eat2yFsyy7p NULL
+-11 cvLH6Eat2yFsyy7p NULL
+-12 cvLH6Eat2yFsyy7p NULL
+-13 cvLH6Eat2yFsyy7p NULL
+-16 cvLH6Eat2yFsyy7p NULL
+-16 cvLH6Eat2yFsyy7p NULL
+-19 cvLH6Eat2yFsyy7p NULL
+-21 cvLH6Eat2yFsyy7p NULL
+-21 cvLH6Eat2yFsyy7p NULL
+-22 cvLH6Eat2yFsyy7p NULL
+-22 cvLH6Eat2yFsyy7p NULL
+-22 cvLH6Eat2yFsyy7p NULL
+-23 cvLH6Eat2yFsyy7p NULL
+-23 cvLH6Eat2yFsyy7p NULL
+-23 cvLH6Eat2yFsyy7p NULL
+-24 cvLH6Eat2yFsyy7p NULL
+-28 cvLH6Eat2yFsyy7p NULL
+-28 cvLH6Eat2yFsyy7p NULL
+-30 cvLH6Eat2yFsyy7p NULL
+-32 cvLH6Eat2yFsyy7p NULL
+-33 cvLH6Eat2yFsyy7p NULL
+-33 cvLH6Eat2yFsyy7p NULL
+-34 cvLH6Eat2yFsyy7p NULL
+-34 cvLH6Eat2yFsyy7p NULL
+-36 cvLH6Eat2yFsyy7p NULL
+-37 cvLH6Eat2yFsyy7p NULL
+-4 cvLH6Eat2yFsyy7p NULL
+-4 cvLH6Eat2yFsyy7p NULL
+-40 cvLH6Eat2yFsyy7p NULL
+-43 cvLH6Eat2yFsyy7p NULL
+-44 cvLH6Eat2yFsyy7p NULL
+-45 cvLH6Eat2yFsyy7p NULL
+-45 cvLH6Eat2yFsyy7p NULL
+-47 cvLH6Eat2yFsyy7p NULL
+-48 cvLH6Eat2yFsyy7p NULL
+-48 cvLH6Eat2yFsyy7p NULL
+-5 cvLH6Eat2yFsyy7p NULL
+-5 cvLH6Eat2yFsyy7p NULL
+-5 cvLH6Eat2yFsyy7p NULL
+-50 cvLH6Eat2yFsyy7p NULL
+-51 cvLH6Eat2yFsyy7p NULL
+-53 cvLH6Eat2yFsyy7p NULL
+-54 cvLH6Eat2yFsyy7p NULL
+-55 cvLH6Eat2yFsyy7p NULL
+-55 cvLH6Eat2yFsyy7p NULL
+-56 cvLH6Eat2yFsyy7p NULL
+-56 cvLH6Eat2yFsyy7p NULL
+-57 cvLH6Eat2yFsyy7p NULL
+-59 cvLH6Eat2yFsyy7p NULL
+-62 cvLH6Eat2yFsyy7p NULL
+-7 cvLH6Eat2yFsyy7p NULL
+0 cvLH6Eat2yFsyy7p NULL
+0 cvLH6Eat2yFsyy7p NULL
+10 cvLH6Eat2yFsyy7p NULL
+13 cvLH6Eat2yFsyy7p NULL
+16 cvLH6Eat2yFsyy7p NULL
+18 cvLH6Eat2yFsyy7p NULL
+19 cvLH6Eat2yFsyy7p NULL
+2 cvLH6Eat2yFsyy7p NULL
+21 cvLH6Eat2yFsyy7p NULL
+24 cvLH6Eat2yFsyy7p NULL
+24 cvLH6Eat2yFsyy7p NULL
+26 cvLH6Eat2yFsyy7p NULL
+27 cvLH6Eat2yFsyy7p NULL
+27 cvLH6Eat2yFsyy7p NULL
+28 cvLH6Eat2yFsyy7p NULL
+29 cvLH6Eat2yFsyy7p NULL
+29 cvLH6Eat2yFsyy7p NULL
+30 cvLH6Eat2yFsyy7p NULL
+31 cvLH6Eat2yFsyy7p NULL
+31 cvLH6Eat2yFsyy7p NULL
+34 cvLH6Eat2yFsyy7p NULL
+34 cvLH6Eat2yFsyy7p NULL
+36 cvLH6Eat2yFsyy7p NULL
+36 cvLH6Eat2yFsyy7p NULL
+38 cvLH6Eat2yFsyy7p NULL
+38 cvLH6Eat2yFsyy7p NULL
+38 cvLH6Eat2yFsyy7p NULL
+39 cvLH6Eat2yFsyy7p NULL
+4 cvLH6Eat2yFsyy7p NULL
+40 cvLH6Eat2yFsyy7p NULL
+40 cvLH6Eat2yFsyy7p NULL
+41 cvLH6Eat2yFsyy7p NULL
+43 cvLH6Eat2yFsyy7p NULL
+46 cvLH6Eat2yFsyy7p NULL
+5 cvLH6Eat2yFsyy7p NULL
+51 cvLH6Eat2yFsyy7p NULL
+51 cvLH6Eat2yFsyy7p NULL
+53 cvLH6Eat2yFsyy7p NULL
+53 cvLH6Eat2yFsyy7p NULL
+61 cvLH6Eat2yFsyy7p NULL
+61 cvLH6Eat2yFsyy7p NULL
+61 cvLH6Eat2yFsyy7p NULL
+62 cvLH6Eat2yFsyy7p NULL
+8 cvLH6Eat2yFsyy7p NULL
+9 cvLH6Eat2yFsyy7p NULL
+NULL cvLH6Eat2yFsyy7p NULL
+NULL cvLH6Eat2yFsyy7p NULL
+PREHOOK: query: select * from text_llap100 order by cint, cstring1, cstring2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@text_llap100
+#### A masked pattern was here ####
+POSTHOOK: query: select * from text_llap100 order by cint, cstring1, cstring2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@text_llap100
+#### A masked pattern was here ####
+-1 -75 528534767 NULL -1.389 -863.257 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:49.331 1969-12-31 16:00:07.585 true NULL
+-11 -15431 528534767 NULL -11.0 -15431.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.176 1969-12-31 16:00:07.787 true NULL
+-11 7476 528534767 NULL -11.0 7476.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.551 1969-12-31 15:59:57.567 true NULL
+-11 9472 528534767 NULL -11.0 9472.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:47.917 1969-12-31 16:00:03.716 true NULL
+-12 -2013 528534767 NULL -12.0 -2013.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:14.907 1969-12-31 15:59:58.789 true NULL
+-13 -13372 528534767 NULL -13.0 -13372.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:08.499 1969-12-31 15:59:48.221 true NULL
+-16 -6922 528534767 NULL -16.0 -6922.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:08.402 1969-12-31 15:59:50.561 true NULL
+-16 -7964 528534767 NULL -16.0 -7964.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:08.035 1969-12-31 16:00:12.464 true NULL
+-19 1206 528534767 NULL -19.0 1206.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.587 1969-12-31 16:00:08.381 true NULL
+-21 -7183 528534767 NULL -21.0 -7183.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:45.035 1969-12-31 16:00:06.182 true NULL
+-21 3168 528534767 NULL -21.0 3168.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:56.834 1969-12-31 16:00:13.331 true NULL
+-22 3856 528534767 NULL -22.0 3856.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:48.508 1969-12-31 15:59:54.534 true NULL
+-22 77 528534767 NULL -22.0 77.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:45.928 1969-12-31 15:59:43.621 true NULL
+-22 8499 528534767 NULL -22.0 8499.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.626 1969-12-31 16:00:10.923 true NULL
+-23 -10154 528534767 NULL -23.0 -10154.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.088 1969-12-31 15:59:56.086 true NULL
+-23 13026 528534767 NULL -23.0 13026.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:14.625 1969-12-31 16:00:10.77 true NULL
+-23 4587 528534767 NULL -23.0 4587.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:49.732 1969-12-31 15:59:48.52 true NULL
+-24 163 528534767 NULL -24.0 163.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.51 1969-12-31 16:00:04.014 true NULL
+-28 -15813 528534767 NULL -28.0 -15813.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.787 1969-12-31 16:00:01.546 true NULL
+-28 6453 528534767 NULL -28.0 6453.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:01.475 1969-12-31 16:00:07.828 true NULL
+-30 834 528534767 NULL -30.0 834.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:14.072 1969-12-31 16:00:03.004 true NULL
+-32 11242 528534767 NULL -32.0 11242.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:49.091 1969-12-31 15:59:55.681 true NULL
+-33 14072 528534767 NULL -33.0 14072.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:03.168 1969-12-31 15:59:55.836 true NULL
+-33 7350 528534767 NULL -33.0 7350.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.952 1969-12-31 15:59:48.183 true NULL
+-34 15007 528534767 NULL -34.0 15007.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:50.434 1969-12-31 16:00:13.352 true NULL
+-34 4181 528534767 NULL -34.0 4181.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:06.557 1969-12-31 16:00:04.869 true NULL
+-36 1639 528534767 NULL -36.0 1639.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:00.186 1969-12-31 16:00:13.098 true NULL
+-37 -12472 528534767 NULL -37.0 -12472.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:13.3 1969-12-31 15:59:55.998 true NULL
+-4 -1027 528534767 NULL -4.0 -1027.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.628 1969-12-31 16:00:11.413 true NULL
+-4 2617 528534767 NULL -4.0 2617.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.21 1969-12-31 15:59:44.733 true NULL
+-40 -4463 528534767 NULL -40.0 -4463.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.647 1969-12-31 15:59:46.254 true NULL
+-43 486 528534767 NULL -43.0 486.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:01.345 1969-12-31 15:59:52.667 true NULL
+-44 -1299 528534767 NULL -44.0 -1299.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:00.163 1969-12-31 15:59:47.687 true NULL
+-45 -14072 528534767 NULL -45.0 -14072.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:45.621 1969-12-31 15:59:45.914 true NULL
+-45 5521 528534767 NULL -45.0 5521.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:00.01 1969-12-31 15:59:48.553 true NULL
+-47 -2468 528534767 NULL -47.0 -2468.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:48.68 1969-12-31 16:00:02.94 true NULL
+-48 -7735 528534767 NULL -48.0 -7735.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:09.472 1969-12-31 16:00:00.8 true NULL
+-48 13300 528534767 NULL -48.0 13300.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:00.077 1969-12-31 15:59:45.827 true NULL
+-5 -13229 528534767 NULL -5.0 -13229.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:00.834 1969-12-31 16:00:00.388 true NULL
+-5 -14379 528534767 NULL -5.0 -14379.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:00.037 1969-12-31 15:59:49.141 true NULL
+-5 12422 528534767 NULL -5.0 12422.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.745 1969-12-31 15:59:48.802 true NULL
+-50 -13326 528534767 NULL -50.0 -13326.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.674 1969-12-31 16:00:08.875 true NULL
+-51 -12083 528534767 NULL -51.0 -12083.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:13.026 1969-12-31 16:00:02.52 true NULL
+-53 -3419 528534767 NULL -53.0 -3419.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:46.771 1969-12-31 15:59:53.744 true NULL
+-54 -10268 528534767 NULL -54.0 -10268.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:53.417 1969-12-31 16:00:00.687 true NULL
+-55 -7353 528534767 NULL -55.0 -7353.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.941 1969-12-31 15:59:54.268 true NULL
+-55 -7449 528534767 NULL -55.0 -7449.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:49.846 1969-12-31 15:59:55.75 true NULL
+-56 8353 528534767 NULL -56.0 8353.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:11.242 1969-12-31 15:59:46.526 true NULL
+-56 8402 528534767 NULL -56.0 8402.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.01 1969-12-31 16:00:05.146 true NULL
+-57 -11492 528534767 NULL -57.0 -11492.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:45.261 1969-12-31 16:00:05.306 true NULL
+-59 10688 528534767 NULL -59.0 10688.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.746 1969-12-31 16:00:15.489 true NULL
+-62 10 528534767 NULL -62.0 10.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.265 1969-12-31 15:59:56.584 true NULL
+-7 2541 528534767 NULL -7.0 2541.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:08.353 1969-12-31 15:59:57.374 true NULL
+0 -3166 528534767 NULL 0.0 -3166.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:10.688 1969-12-31 16:00:01.385 true NULL
+0 15626 528534767 NULL 0.0 15626.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:09.566 1969-12-31 16:00:15.217 true NULL
+10 9366 528534767 NULL 10.0 9366.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:01.358 1969-12-31 15:59:50.592 true NULL
+13 1358 528534767 NULL 13.0 1358.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:06.453 1969-12-31 16:00:00.423 true NULL
+16 5780 528534767 NULL 16.0 5780.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.451 1969-12-31 16:00:12.752 true NULL
+18 -3045 528534767 NULL 18.0 -3045.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:47.829 1969-12-31 16:00:05.045 true NULL
+19 7952 528534767 NULL 19.0 7952.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:12.161 1969-12-31 16:00:00.95 true NULL
+2 1345 528534767 NULL 2.0 1345.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.333 1969-12-31 16:00:00.517 true NULL
+21 11737 528534767 NULL 21.0 11737.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.537 1969-12-31 15:59:45.022 true NULL
+24 -4812 528534767 NULL 24.0 -4812.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.86 1969-12-31 15:59:55 true NULL
+24 4432 528534767 NULL 24.0 4432.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:02.541 1969-12-31 16:00:10.895 true NULL
+26 3961 528534767 NULL 26.0 3961.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:57.987 1969-12-31 15:59:52.232 true NULL
+27 -14965 528534767 NULL 27.0 -14965.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:12.422 1969-12-31 16:00:09.517 true NULL
+27 -7824 528534767 NULL 27.0 -7824.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.963 1969-12-31 15:59:56.474 true NULL
+28 8035 528534767 NULL 28.0 8035.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:03.856 1969-12-31 15:59:55.95 true NULL
+29 -1990 528534767 NULL 29.0 -1990.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:06.958 1969-12-31 15:59:52.902 true NULL
+29 7021 528534767 NULL 29.0 7021.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:15.007 1969-12-31 16:00:15.148 true NULL
+30 -814 528534767 NULL 30.0 -814.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:56.955 1969-12-31 16:00:11.799 true NULL
+31 -9566 528534767 NULL 31.0 -9566.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.187 1969-12-31 16:00:06.961 true NULL
+31 4963 528534767 NULL 31.0 4963.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.021 1969-12-31 16:00:02.997 true NULL
+34 -15059 528534767 NULL 34.0 -15059.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:01.639 1969-12-31 16:00:13.206 true NULL
+34 -4255 528534767 NULL 34.0 -4255.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:56.581 1969-12-31 15:59:57.88 true NULL
+36 -15912 528534767 NULL 36.0 -15912.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.432 1969-12-31 16:00:04.376 true NULL
+36 14907 528534767 NULL 36.0 14907.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:47.528 1969-12-31 15:59:47.206 true NULL
+38 -11320 528534767 NULL 38.0 -11320.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:09.169 1969-12-31 16:00:03.822 true NULL
+38 -4667 528534767 NULL 38.0 -4667.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:09.366 1969-12-31 15:59:52.334 true NULL
+38 -6583 528534767 NULL 38.0 -6583.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:53.078 1969-12-31 16:00:06.722 true NULL
+39 -10909 528534767 NULL 39.0 -10909.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.276 1969-12-31 16:00:12.738 true NULL
+4 -14739 528534767 NULL 4.0 -14739.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:55.188 1969-12-31 16:00:15.26 true NULL
+40 -1724 528534767 NULL 40.0 -1724.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:05.521 1969-12-31 15:59:57.835 true NULL
+40 -7984 528534767 NULL 40.0 -7984.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:01.206 1969-12-31 16:00:02.59 true NULL
+41 37 528534767 NULL 41.0 37.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.817 1969-12-31 15:59:53.672 true NULL
+43 1475 528534767 NULL 43.0 1475.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:56.988 1969-12-31 16:00:03.442 true NULL
+46 6958 528534767 NULL 46.0 6958.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.036 1969-12-31 16:00:10.191 true NULL
+5 14625 528534767 NULL 5.0 14625.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:05.78 1969-12-31 16:00:15.34 true NULL
+51 -15790 528534767 NULL 51.0 -15790.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:49.871 1969-12-31 15:59:57.821 true NULL
+51 -4490 528534767 NULL 51.0 -4490.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.476 1969-12-31 15:59:49.318 true NULL
+53 -10129 528534767 NULL 53.0 -10129.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:04.181 1969-12-31 16:00:08.061 true NULL
+53 -12171 528534767 NULL 53.0 -12171.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:07.35 1969-12-31 15:59:57.549 true NULL
+61 -1254 528534767 NULL 61.0 -1254.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:11.737 1969-12-31 16:00:12.004 true NULL
+61 -15549 528534767 NULL 61.0 -15549.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:44.569 1969-12-31 15:59:51.665 true NULL
+61 12161 528534767 NULL 61.0 12161.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:02.617 1969-12-31 16:00:10.536 true NULL
+62 6557 528534767 NULL 62.0 6557.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:52.016 1969-12-31 16:00:00.367 true NULL
+8 7860 528534767 NULL 8.0 7860.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 15:59:58.701 1969-12-31 16:00:01.97 true NULL
+9 9169 528534767 NULL 9.0 9169.0 cvLH6Eat2yFsyy7p NULL 1969-12-31 16:00:03.961 1969-12-31 16:00:14.126 true NULL
+NULL -3012 528534767 NULL NULL -3012.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:03.756 true NULL
+NULL -4213 528534767 NULL NULL -4213.0 cvLH6Eat2yFsyy7p NULL NULL 1969-12-31 16:00:13.589 true NULL
+PREHOOK: query: select csmallint, cstring1, cboolean2 from text_llap100 order by csmallint, cstring1, cboolean2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@text_llap100
+#### A masked pattern was here ####
+POSTHOOK: query: select csmallint, cstring1, cboolean2 from text_llap100 order by csmallint, cstring1, cboolean2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@text_llap100
+#### A masked pattern was here ####
+-10129 cvLH6Eat2yFsyy7p NULL
+-10154 cvLH6Eat2yFsyy7p NULL
+-10268 cvLH6Eat2yFsyy7p NULL
+-1027 cvLH6Eat2yFsyy7p NULL
+-10909 cvLH6Eat2yFsyy7p NULL
+-11320 cvLH6Eat2yFsyy7p NULL
+-11492 cvLH6Eat2yFsyy7p NULL
+-12083 cvLH6Eat2yFsyy7p NULL
+-12171 cvLH6Eat2yFsyy7p NULL
+-12472 cvLH6Eat2yFsyy7p NULL
+-1254 cvLH6Eat2yFsyy7p NULL
+-1299 cvLH6Eat2yFsyy7p NULL
+-13229 cvLH6Eat2yFsyy7p NULL
+-13326 cvLH6Eat2yFsyy7p NULL
+-13372 cvLH6Eat2yFsyy7p NULL
+-14072 cvLH6Eat2yFsyy7p NULL
+-14379 cvLH6Eat2yFsyy7p NULL
+-14739 cvLH6Eat2yFsyy7p NULL
+-14965 cvLH6Eat2yFsyy7p NULL
+-15059 cvLH6Eat2yFsyy7p NULL
+-15431 cvLH6Eat2yFsyy7p NULL
+-15549 cvLH6Eat2yFsyy7p NULL
+-15790 cvLH6Eat2yFsyy7p NULL
+-15813 cvLH6Eat2yFsyy7p NULL
+-15912 cvLH6Eat2yFsyy7p NULL
+-1724 cvLH6Eat2yFsyy7p NULL
+-1990 cvLH6Eat2yFsyy7p NULL
+-2013 cvLH6Eat2yFsyy7p NULL
+-2468 cvLH6Eat2yFsyy7p NULL
+-3012 cvLH6Eat2yFsyy7p NULL
+-3045 cvLH6Eat2yFsyy7p NULL
+-3166 cvLH6Eat2yFsyy7p NULL
+-3419 cvLH6Eat2yFsyy7p NULL
+-4213 cvLH6Eat2yFsyy7p NULL
+-4255 cvLH6Eat2yFsyy7p NULL
+-4463 cvLH6Eat2yFsyy7p NULL
+-4490 cvLH6Eat2yFsyy7p NULL
+-4667 cvLH6Eat2yFsyy7p NULL
+-4812 cvLH6Eat2yFsyy7p NULL
+-6583 cvLH6Eat2yFsyy7p NULL
+-6922 cvLH6Eat2yFsyy7p NULL
+-7183 cvLH6Eat2yFsyy7p NULL
+-7353 cvLH6Eat2yFsyy7p NULL
+-7449 cvLH6Eat2yFsyy7p NULL
+-75 cvLH6Eat2yFsyy7p NULL
+-7735 cvLH6Eat2yFsyy7p NULL
+-7824 cvLH6Eat2yFsyy7p NULL
+-7964 cvLH6Eat2yFsyy7p NULL
+-7984 cvLH6Eat2yFsyy7p NULL
+-814 cvLH6Eat2yFsyy7p NULL
+-9566 cvLH6Eat2yFsyy7p NULL
+10 cvLH6Eat2yFsyy7p NULL
+10688 cvLH6Eat2yFsyy7p NULL
+11242 cvLH6Eat2yFsyy7p NULL
+11737 cvLH6Eat2yFsyy7p NULL
+1206 cvLH6Eat2yFsyy7p NULL
+12161 cvLH6Eat2yFsyy7p NULL
+12422 cvLH6Eat2yFsyy7p NULL
+13026 cvLH6Eat2yFsyy7p NULL
+13300 cvLH6Eat2yFsyy7p NULL
+1345 cvLH6Eat2yFsyy7p NULL
+1358 cvLH6Eat2yFsyy7p NULL
+14072 cvLH6Eat2yFsyy7p NULL
+14625 cvLH6Eat2yFsyy7p NULL
+1475 cvLH6Eat2yFsyy7p NULL
+14907 cvLH6Eat2yFsyy7p NULL
+15007 cvLH6Eat2yFsyy7p NULL
+15626 cvLH6Eat2yFsyy7p NULL
+163 cvLH6Eat2yFsyy7p NULL
+1639 cvLH6Eat2yFsyy7p NULL
+2541 cvLH6Eat2yFsyy7p NULL
+2617 cvLH6Eat2yFsyy7p NULL
+3168 cvLH6Eat2yFsyy7p NULL
+37 cvLH6Eat2yFsyy7p NULL
+3856 cvLH6Eat2yFsyy7p NULL
+3961 cvLH6Eat2yFsyy7p NULL
+4181 cvLH6Eat2yFsyy7p NULL
+4432 cvLH6Eat2yFsyy7p NULL
+4587 cvLH6Eat2yFsyy7p NULL
+486 cvLH6Eat2yFsyy7p NULL
+4963 cvLH6Eat2yFsyy7p NULL
+5521 cvLH6Eat2yFsyy7p NULL
+5780 cvLH6Eat2yFsyy7p NULL
+6453 cvLH6Eat2yFsyy7p NULL
+6557 cvLH6Eat2yFsyy7p NULL
+6958 cvLH6Eat2yFsyy7p NULL
+7021 cvLH6Eat2yFsyy7p NULL
+7350 cvLH6Eat2yFsyy7p NULL
+7476 cvLH6Eat2yFsyy7p NULL
+77 cvLH6Eat2yFsyy7p NULL
+7860 cvLH6Eat2yFsyy7p NULL
+7952 cvLH6Eat2yFsyy7p NULL
+8035 cvLH6Eat2yFsyy7p NULL
+834 cvLH6Eat2yFsyy7p NULL
+8353 cvLH6Eat2yFsyy7p NULL
+8402 cvLH6Eat2yFsyy7p NULL
+8499 cvLH6Eat2yFsyy7p NULL
+9169 cvLH6Eat2yFsyy7p NULL
+9366 cvLH6Eat2yFsyy7p NULL
+9472 cvLH6Eat2yFsyy7p NULL
+PREHOOK: query: select t, s, ts from text_llap2 order by t, s, ts limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@text_llap2
+#### A masked pattern was here ####
+POSTHOOK: query: select t, s, ts from text_llap2 order by t, s, ts limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@text_llap2
+#### A masked pattern was here ####
+-2 alice carson 2013-03-01 09:11:58.703074
+-2 alice nixon 2013-03-01 09:11:58.703321
+-2 alice underhill 2013-03-01 09:11:58.703122
+-2 alice underhill 2013-03-01 09:11:58.703127
+-2 alice xylophone 2013-03-01 09:11:58.703105
+-2 bob falkner 2013-03-01 09:11:58.703071
+-2 bob king 2013-03-01 09:11:58.703236
+-2 bob ovid 2013-03-01 09:11:58.703285
+-2 bob van buren 2013-03-01 09:11:58.703218
+-2 bob xylophone 2013-03-01 09:11:58.703219
+-2 calvin xylophone 2013-03-01 09:11:58.703083
+-2 david falkner 2013-03-01 09:11:58.703254
+-2 david laertes 2013-03-01 09:11:58.703076
+-2 david miller 2013-03-01 09:11:58.703238
+-3 alice allen 2013-03-01 09:11:58.703323
+-3 alice davidson 2013-03-01 09:11:58.703226
+-3 alice falkner 2013-03-01 09:11:58.703304
+-3 alice king 2013-03-01 09:11:58.70314
+-3 alice king 2013-03-01 09:11:58.703247
+-3 alice xylophone 2013-03-01 09:11:58.703129
+-3 bob ellison 2013-03-01 09:11:58.703261
+-3 bob falkner 2013-03-01 09:11:58.70328
+-3 bob ichabod 2013-03-01 09:11:58.70324
+-3 bob johnson 2013-03-01 09:11:58.703204
+-3 bob polk 2013-03-01 09:11:58.703128
+-3 bob underhill 2013-03-01 09:11:58.703176
+-3 bob underhill 2013-03-01 09:11:58.703188
+-3 bob van buren 2013-03-01 09:11:58.703199
+-3 calvin ichabod 2013-03-01 09:11:58.703213
+-3 calvin white 2013-03-01 09:11:58.703295
+-3 david carson 2013-03-01 09:11:58.703136
+-3 david falkner 2013-03-01 09:11:58.703305
+-3 david garcia 2013-03-01 09:11:58.70319
+-3 david hernandez 2013-03-01 09:11:58.703252
+-3 ethan steinbeck 2013-03-01 09:11:58.703079
+-3 ethan underhill 2013-03-01 09:11:58.703138
+-3 fred ellison 2013-03-01 09:11:58.703233
+-3 gabriella brown 2013-03-01 09:11:58.703288
+-3 holly nixon 2013-03-01 09:11:58.703262
+-3 holly polk 2013-03-01 09:11:58.703273
+-3 holly steinbeck 2013-03-01 09:11:58.703242
+-3 holly thompson 2013-03-01 09:11:58.703073
+-3 holly underhill 2013-03-01 09:11:58.703219
+-3 irene ellison 2013-03-01 09:11:58.703092
+-3 irene underhill 2013-03-01 09:11:58.703298
+-3 irene young 2013-03-01 09:11:58.703084
+-3 jessica johnson 2013-03-01 09:11:58.703319
+-3 jessica king 2013-03-01 09:11:58.703279
+-3 jessica miller 2013-03-01 09:11:58.703245
+-3 jessica white 2013-03-01 09:11:58.703199
+-3 katie ichabod 2013-03-01 09:11:58.703139
+-3 luke garcia 2013-03-01 09:11:58.703076
+-3 luke ichabod 2013-03-01 09:11:58.703294
+-3 luke king 2013-03-01 09:11:58.703207
+-3 luke young 2013-03-01 09:11:58.703182
+-3 mike allen 2013-03-01 09:11:58.703292
+-3 mike king 2013-03-01 09:11:58.703214
+-3 mike polk 2013-03-01 09:11:58.70319
+-3 mike white 2013-03-01 09:11:58.703087
+-3 mike xylophone 2013-03-01 09:11:58.703308
+-3 nick nixon 2013-03-01 09:11:58.703083
+-3 nick robinson 2013-03-01 09:11:58.703147
+-3 oscar davidson 2013-03-01 09:11:58.703071
+-3 oscar garcia 2013-03-01 09:11:58.703282
+-3 oscar johnson 2013-03-01 09:11:58.70311
+-3 oscar johnson 2013-03-01 09:11:58.703133
+-3 oscar miller 2013-03-01 09:11:58.70332
+-3 priscilla laertes 2013-03-01 09:11:58.70325
+-3 priscilla quirinius 2013-03-01 09:11:58.703228
+-3 priscilla zipper 2013-03-01 09:11:58.703321
+-3 quinn ellison 2013-03-01 09:11:58.703232
+-3 quinn polk 2013-03-01 09:11:58.703244
+-3 rachel davidson 2013-03-01 09:11:58.703316
+-3 rachel thompson 2013-03-01 09:11:58.703276
+-3 sarah miller 2013-03-01 09:11:58.70316
+-3 sarah robinson 2013-03-01 09:11:58.703288
+-3 sarah xylophone 2013-03-01 09:11:58.703112
+-3 sarah zipper 2013-03-01 09:11:58.703289
+-3 tom hernandez 2013-03-01 09:11:58.703108
+-3 tom hernandez 2013-03-01 09:11:58.703188
+-3 tom polk 2013-03-01 09:11:58.703217
+-3 tom steinbeck 2013-03-01 09:11:58.703251
+-3 ulysses carson 2013-03-01 09:11:58.703253
+-3 ulysses ellison 2013-03-01 09:11:58.703197
+-3 ulysses quirinius 2013-03-01 09:11:58.703189
+-3 ulysses robinson 2013-03-01 09:11:58.703227
+-3 ulysses steinbeck 2013-03-01 09:11:58.703259
+-3 victor allen 2013-03-01 09:11:58.703155
+-3 victor hernandez 2013-03-01 09:11:58.703176
+-3 victor robinson 2013-03-01 09:11:58.703305
+-3 victor thompson 2013-03-01 09:11:58.703299
+-3 victor xylophone 2013-03-01 09:11:58.703135
+-3 wendy quirinius 2013-03-01 09:11:58.703266
+-3 wendy robinson 2013-03-01 09:11:58.703294
+-3 wendy xylophone 2013-03-01 09:11:58.703191
+-3 xavier garcia 2013-03-01 09:11:58.703194
+-3 xavier ovid 2013-03-01 09:11:58.703148
+-3 yuri xylophone 2013-03-01 09:11:58.703258
+-3 zach thompson 2013-03-01 09:11:58.703252
+-3 zach young 2013-03-01 09:11:58.703191
+PREHOOK: query: select csmallint, cstring1, cboolean2 from text_llap100 order by csmallint, cstring1, cboolean2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@text_llap100
+#### A masked pattern was here ####
+POSTHOOK: query: select csmallint, cstring1, cboolean2 from text_llap100 order by csmallint, cstring1, cboolean2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@text_llap100
+#### A masked pattern was here ####
+-10129 cvLH6Eat2yFsyy7p NULL
+-10154 cvLH6Eat2yFsyy7p NULL
+-10268 cvLH6Eat2yFsyy7p NULL
+-1027 cvLH6Eat2yFsyy7p NULL
+-10909 cvLH6Eat2yFsyy7p NULL
+-11320 cvLH6Eat2yFsyy7p NULL
+-11492 cvLH6Eat2yFsyy7p NULL
+-12083 cvLH6Eat2yFsyy7p NULL
+-12171 cvLH6Eat2yFsyy7p NULL
+-12472 cvLH6Eat2yFsyy7p NULL
+-1254 cvLH6Eat2yFsyy7p NULL
+-1299 cvLH6Eat2yFsyy7p NULL
+-13229 cvLH6Eat2yFsyy7p NULL
+-13326 cvLH6Eat2yFsyy7p NULL
+-13372 cvLH6Eat2yFsyy7p NULL
+-14072 cvLH6Eat2yFsyy7p NULL
+-14379 cvLH6Eat2yFsyy7p NULL
+-14739 cvLH6Eat2yFsyy7p NULL
+-14965 cvLH6Eat2yFsyy7p NULL
+-15059 cvLH6Eat2yFsyy7p NULL
+-15431 cvLH6Eat2yFsyy7p NULL
+-15549 cvLH6Eat2yFsyy7p NULL
+-15790 cvLH6Eat2yFsyy7p NULL
+-15813 cvLH6Eat2yFsyy7p NULL
+-15912 cvLH6Eat2yFsyy7p NULL
+-1724 cvLH6Eat2yFsyy7p NULL
+-1990 cvLH6Eat2yFsyy7p NULL
+-2013 cvLH6Eat2yFsyy7p NULL
+-2468 cvLH6Eat2yFsyy7p NULL
+-3012 cvLH6Eat2yFsyy7p NULL
+-3045 cvLH6Eat2yFsyy7p NULL
+-3166 cvLH6Eat2yFsyy7p NULL
+-3419 cvLH6Eat2yFsyy7p NULL
+-4213 cvLH6Eat2yFsyy7p NULL
+-4255 cvLH6Eat2yFsyy7p NULL
+-4463 cvLH6Eat2yFsyy7p NULL
+-4490 cvLH6Eat2yFsyy7p NULL
+-4667 cvLH6Eat2yFsyy7p NULL
+-4812 cvLH6Eat2yFsyy7p NULL
+-6583 cvLH6Eat2yFsyy7p NULL
+-6922 cvLH6Eat2yFsyy7p NULL
+-7183 cvLH6Eat2yFsyy7p NULL
+-7353 cvLH6Eat2yFsyy7p NULL
+-7449 cvLH6Eat2yFsyy7p NULL
+-75 cvLH6Eat2yFsyy7p NULL
+-7735 cvLH6Eat2yFsyy7p NULL
+-7824 cvLH6Eat2yFsyy7p NULL
+-7964 cvLH6Eat2yFsyy7p NULL
+-7984 cvLH6Eat2yFsyy7p NULL
+-814 cvLH6Eat2yFsyy7p NULL
+-9566 cvLH6Eat2yFsyy7p NULL
+10 cvLH6Eat2yFsyy7p NULL
+10688 cvLH6Eat2yFsyy7p NULL
+11242 cvLH6Eat2yFsyy7p NULL
+11737 cvLH6Eat2yFsyy7p NULL
+1206 cvLH6Eat2yFsyy7p NULL
+12161 cvLH6Eat2yFsyy7p NULL
+12422 cvLH6Eat2yFsyy7p NULL
+13026 cvLH6Eat2yFsyy7p NULL
+13300 cvLH6Eat2yFsyy7p NULL
+1345 cvLH6Eat2yFsyy7p NULL
+1358 cvLH6Eat2yFsyy7p NULL
+14072 cvLH6Eat2yFsyy7p NULL
+14625 cvLH6Eat2yFsyy7p NULL
+1475 cvLH6Eat2yFsyy7p NULL
+14907 cvLH6Eat2yFsyy7p NULL
+15007 cvLH6Eat2yFsyy7p NULL
+15626 cvLH6Eat2yFsyy7p NULL
+163 cvLH6Eat2yFsyy7p NULL
+1639 cvLH6Eat2yFsyy7p NULL
+2541 cvLH6Eat2yFsyy7p NULL
+2617 cvLH6Eat2yFsyy7p NULL
+3168 cvLH6Eat2yFsyy7p NULL
+37 cvLH6Eat2yFsyy7p NULL
+3856 cvLH6Eat2yFsyy7p NULL
+3961 cvLH6Eat2yFsyy7p NULL
+4181 cvLH6Eat2yFsyy7p NULL
+4432 cvLH6Eat2yFsyy7p NULL
+4587 cvLH6Eat2yFsyy7p NULL
+486 cvLH6Eat2yFsyy7p NULL
+4963 cvLH6Eat2yFsyy7p NULL
+5521 cvLH6Eat2yFsyy7p NULL
+5780 cvLH6Eat2yFsyy7p NULL
+6453 cvLH6Eat2yFsyy7p NULL
+6557 cvLH6Eat2yFsyy7p NULL
+6958 cvLH6Eat2yFsyy7p NULL
+7021 cvLH6Eat2yFsyy7p NULL
+7350 cvLH6Eat2yFsyy7p NULL
+7476 cvLH6Eat2yFsyy7p NULL
+77 cvLH6Eat2yFsyy7p NULL
+7860 cvLH6Eat2yFsyy7p NULL
+7952 cvLH6Eat2yFsyy7p NULL
+8035 cvLH6Eat2yFsyy7p NULL
+834 cvLH6Eat2yFsyy7p NULL
+8353 cvLH6Eat2yFsyy7p NULL
+8402 cvLH6Eat2yFsyy7p NULL
+8499 cvLH6Eat2yFsyy7p NULL
+9169 cvLH6Eat2yFsyy7p NULL
+9366 cvLH6Eat2yFsyy7p NULL
+9472 cvLH6Eat2yFsyy7p NULL
+PREHOOK: query: DROP TABLE text_llap
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@text_llap
+PREHOOK: Output: default@text_llap
+POSTHOOK: query: DROP TABLE text_llap
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@text_llap
+POSTHOOK: Output: default@text_llap
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/llap_uncompressed.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_uncompressed.q.out b/ql/src/test/results/clientpositive/llap/llap_uncompressed.q.out
new file mode 100644
index 0000000..6900cdb
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/llap_uncompressed.q.out
@@ -0,0 +1,283 @@
+PREHOOK: query: DROP TABLE orc_llap_n0
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orc_llap_n0
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orc_llap_n0(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+ cboolean2 BOOLEAN,
+ cdecimal1 decimal(10,2),
+ cdecimal2 decimal(38,5))
+ STORED AS ORC tblproperties ("orc.compress"="NONE")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_llap_n0
+POSTHOOK: query: CREATE TABLE orc_llap_n0(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+ cboolean2 BOOLEAN,
+ cdecimal1 decimal(10,2),
+ cdecimal2 decimal(38,5))
+ STORED AS ORC tblproperties ("orc.compress"="NONE")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_llap_n0
+PREHOOK: query: insert into table orc_llap_n0
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2,
+ cast("3.345" as decimal(10,2)), cast("5.56789" as decimal(38,5)) from alltypesorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@orc_llap_n0
+POSTHOOK: query: insert into table orc_llap_n0
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2,
+ cast("3.345" as decimal(10,2)), cast("5.56789" as decimal(38,5)) from alltypesorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@orc_llap_n0
+POSTHOOK: Lineage: orc_llap_n0.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cdecimal1 SIMPLE []
+POSTHOOK: Lineage: orc_llap_n0.cdecimal2 SIMPLE []
+POSTHOOK: Lineage: orc_llap_n0.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: alter table orc_llap_n0 set tblproperties ("orc.compress"="NONE", 'orc.write.format'='UNSTABLE-PRE-2.0')
+PREHOOK: type: ALTERTABLE_PROPERTIES
+PREHOOK: Input: default@orc_llap_n0
+PREHOOK: Output: default@orc_llap_n0
+POSTHOOK: query: alter table orc_llap_n0 set tblproperties ("orc.compress"="NONE", 'orc.write.format'='UNSTABLE-PRE-2.0')
+POSTHOOK: type: ALTERTABLE_PROPERTIES
+POSTHOOK: Input: default@orc_llap_n0
+POSTHOOK: Output: default@orc_llap_n0
+PREHOOK: query: insert into table orc_llap_n0
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2,
+ cast("3.345" as decimal(10,2)), cast("5.56789" as decimal(38,5)) from alltypesorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@orc_llap_n0
+POSTHOOK: query: insert into table orc_llap_n0
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2,
+ cast("3.345" as decimal(10,2)), cast("5.56789" as decimal(38,5)) from alltypesorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@orc_llap_n0
+POSTHOOK: Lineage: orc_llap_n0.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cdecimal1 SIMPLE []
+POSTHOOK: Lineage: orc_llap_n0.cdecimal2 SIMPLE []
+POSTHOOK: Lineage: orc_llap_n0.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: drop table llap_temp_table
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table llap_temp_table
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: explain
+select * from orc_llap_n0 where cint > 10 and cbigint is not null
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from orc_llap_n0 where cint > 10 and cbigint is not null
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: orc_llap_n0
+ filterExpr: ((cint > 10) and cbigint is not null) (type: boolean)
+ Filter Operator
+ predicate: ((cint > 10) and cbigint is not null) (type: boolean)
+ Select Operator
+ expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean), cdecimal1 (type: decimal(10,2)), cdecimal2 (type: decimal(38,5))
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
+ ListSink
+
+PREHOOK: query: create table llap_temp_table as
+select * from orc_llap_n0 where cint > 10 and cbigint is not null
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@orc_llap_n0
+PREHOOK: Output: database:default
+PREHOOK: Output: default@llap_temp_table
+POSTHOOK: query: create table llap_temp_table as
+select * from orc_llap_n0 where cint > 10 and cbigint is not null
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@orc_llap_n0
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@llap_temp_table
+POSTHOOK: Lineage: llap_temp_table.cbigint SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cboolean1 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cboolean2 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cdecimal1 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cdecimal1, type:decimal(10,2), comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cdecimal2 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cdecimal2, type:decimal(38,5), comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cdouble SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cfloat SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cint SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.csmallint SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cstring1 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cstring2 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.ctimestamp1 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.ctimestamp2 SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.ctinyint SIMPLE [(orc_llap_n0)orc_llap_n0.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: select sum(hash(*)) from llap_temp_table
+PREHOOK: type: QUERY
+PREHOOK: Input: default@llap_temp_table
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from llap_temp_table
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@llap_temp_table
+#### A masked pattern was here ####
+212787774304
+PREHOOK: query: explain
+select * from orc_llap_n0 where cint > 10 and cint < 5000000
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from orc_llap_n0 where cint > 10 and cint < 5000000
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: orc_llap_n0
+ filterExpr: ((cint > 10) and (cint < 5000000)) (type: boolean)
+ Filter Operator
+ predicate: ((cint < 5000000) and (cint > 10)) (type: boolean)
+ Select Operator
+ expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean), cdecimal1 (type: decimal(10,2)), cdecimal2 (type: decimal(38,5))
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
+ ListSink
+
+PREHOOK: query: select * from orc_llap_n0 where cint > 10 and cint < 5000000
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_llap_n0
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_llap_n0 where cint > 10 and cint < 5000000
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_llap_n0
+#### A masked pattern was here ####
+-51 NULL 6981 707684071 -51.0 NULL YdG61y00526u5 G71l66F25 1969-12-31 16:00:08.451 NULL false true 3.35 5.56789
+-51 NULL 762 1587111633 -51.0 NULL q5y2Vy1 UbUx5 1969-12-31 16:00:08.451 NULL true false 3.35 5.56789
+-51 NULL 6981 -1569596201 -51.0 NULL o4lvY20511w0EOX3P3I82p63 J6YIW3yQlW3GydlRm 1969-12-31 16:00:08.451 NULL false true 3.35 5.56789
+-51 NULL 2949963 -1580871111 -51.0 NULL 0K68k3bdl7jO7 TPPAu 1969-12-31 16:00:08.451 NULL true false 3.35 5.56789
+-51 NULL 2089466 -240556350 -51.0 NULL cXX24dH7tblSj46j2g C31eea0wrHHqvj 1969-12-31 16:00:08.451 NULL true true 3.35 5.56789
+-51 NULL 6981 -471484665 -51.0 NULL 4KhrrQ0nJ7bMNTvhSCA R31tq72k1528DQ5C3Y4cNub 1969-12-31 16:00:08.451 NULL true false 3.35 5.56789
+-51 NULL 762 -755927849 -51.0 NULL a10E76jX35YwquKCTA s7473frMk58vm 1969-12-31 16:00:08.451 NULL true true 3.35 5.56789
+NULL 1016 3432650 1864027286 NULL 1016.0 0SPVSOVDI73t 4KWs6gw7lv2WYd66P NULL 1969-12-31 16:00:12.364 false true 3.35 5.56789
+NULL 10144 4756105 1864027286 NULL 10144.0 bvoO6VwRmH6181mdOm87Do 4KWs6gw7lv2WYd66P NULL 1969-12-31 16:00:12.134 true true 3.35 5.56789
+NULL 10653 3887593 1864027286 NULL 10653.0 2wak50xB5nHswbX 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:48.858 false true 3.35 5.56789
+NULL 10782 1286921 1864027286 NULL 10782.0 ODLrXI8882q8LS8 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:52.138 true true 3.35 5.56789
+NULL 197 762 1864027286 NULL 2563.58 3WsVeqb28VWEEOLI8ail 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:45.603 true true 3.35 5.56789
+NULL 1535 86028 1864027286 NULL 1535.0 T2o8XRFAL0HC4ikDQnfoCymw 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:54.662 true true 3.35 5.56789
+NULL 5064 504142 1864027286 NULL 5064.0 PlOxor04p5cvVl 4KWs6gw7lv2WYd66P NULL 1969-12-31 16:00:09.828 true true 3.35 5.56789
+NULL -3799 1248059 1864027286 NULL -3799.0 Uhps6mMh3IfHB3j7yH62K 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:54.622 false true 3.35 5.56789
+NULL 10299 799471 1864027286 NULL 10299.0 2fu24 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:52.516 false true 3.35 5.56789
+NULL -8915 2101183 1864027286 NULL -8915.0 x7By66525 4KWs6gw7lv2WYd66P NULL 1969-12-31 16:00:05.831 false true 3.35 5.56789
+8 NULL 2433892 -1611863517 8.0 NULL 674ILv3V2TxFqXP6wSbL VLprkK2XfX 1969-12-31 16:00:15.892 NULL false true 3.35 5.56789
+8 NULL 3073556 332961835 8.0 NULL rR855m18hps5nkaFqE43W pH15gLf8B4yNFDWFH74 1969-12-31 16:00:15.892 NULL true true 3.35 5.56789
+8 NULL 6981 627355276 8.0 NULL K630vaVf 7gDn3I45FGIX0J6JH74PCEN 1969-12-31 16:00:15.892 NULL false true 3.35 5.56789
+8 NULL 2229621 -381406148 8.0 NULL q7onkS7QRPh5ghOK oKb0bi 1969-12-31 16:00:15.892 NULL true false 3.35 5.56789
+NULL 359 6981 -1887561756 NULL 9763215.5639 sF2CRfgt2K 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:12.489 false false 3.35 5.56789
+NULL -12328 3253295 -1887561756 NULL -12328.0 Ut5NYg5XWb 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:57.985 true false 3.35 5.56789
+11 NULL 1000828 1531084669 11.0 NULL wM316f6NqGIkoP388j3F6 poWQQo3Upvt3Wh 1969-12-31 16:00:02.351 NULL false true 3.35 5.56789
+11 NULL 6981 -1908387379 11.0 NULL a3EhVU6Wuy7ycJ7wY7h2gv 0542kSCNs54o7tD6e2YuI3 1969-12-31 16:00:02.351 NULL true false 3.35 5.56789
+11 NULL 1310786 -413875656 11.0 NULL W0rvA4H1xn0xMG4uk0 8yVVjG 1969-12-31 16:00:02.351 NULL false true 3.35 5.56789
+11 NULL 6981 -667592125 11.0 NULL NULL xIVF2uu7 1969-12-31 16:00:02.351 NULL NULL true 3.35 5.56789
+11 NULL 3583612 -1172590956 11.0 NULL hrSdTD2Q05 mJ5nwN6o4s8Hi4 1969-12-31 16:00:02.351 NULL true true 3.35 5.56789
+11 NULL 6981 1532810435 11.0 NULL Y5x3JuI3M8jngv5N L760FuvYP 1969-12-31 16:00:02.351 NULL true true 3.35 5.56789
+11 NULL 4972984 -483828108 11.0 NULL Sf45K8ueb68jp6s8 jPWX6Wr4fmTBSc5HSlX1r 1969-12-31 16:00:02.351 NULL true false 3.35 5.56789
+11 NULL 762 -1005594359 11.0 NULL BLoMwUJ51ns6pd FtT7S 1969-12-31 16:00:02.351 NULL false false 3.35 5.56789
+NULL 359 762 -1645852809 NULL 9763215.5639 40ks5556SV xH7445Rals48VOulSyR5F NULL 1969-12-31 15:59:55.352 false false 3.35 5.56789
+NULL -75 6981 -1645852809 NULL -863.257 o5mb0QP5Y48Qd4vdB0 xH7445Rals48VOulSyR5F NULL 1969-12-31 15:59:44.062 true false 3.35 5.56789
+NULL -75 6981 -1645852809 NULL -863.257 1FNNhmiFLGw425NA13g xH7445Rals48VOulSyR5F NULL 1969-12-31 15:59:58.463 false false 3.35 5.56789
+NULL -13036 1288927 -1645852809 NULL -13036.0 yinBY725P7V2 xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:00.763 true false 3.35 5.56789
+-51 NULL 6981 707684071 -51.0 NULL YdG61y00526u5 G71l66F25 1969-12-31 16:00:08.451 NULL false true 3.35 5.56789
+-51 NULL 762 1587111633 -51.0 NULL q5y2Vy1 UbUx5 1969-12-31 16:00:08.451 NULL true false 3.35 5.56789
+-51 NULL 6981 -1569596201 -51.0 NULL o4lvY20511w0EOX3P3I82p63 J6YIW3yQlW3GydlRm 1969-12-31 16:00:08.451 NULL false true 3.35 5.56789
+-51 NULL 2949963 -1580871111 -51.0 NULL 0K68k3bdl7jO7 TPPAu 1969-12-31 16:00:08.451 NULL true false 3.35 5.56789
+-51 NULL 2089466 -240556350 -51.0 NULL cXX24dH7tblSj46j2g C31eea0wrHHqvj 1969-12-31 16:00:08.451 NULL true true 3.35 5.56789
+-51 NULL 6981 -471484665 -51.0 NULL 4KhrrQ0nJ7bMNTvhSCA R31tq72k1528DQ5C3Y4cNub 1969-12-31 16:00:08.451 NULL true false 3.35 5.56789
+-51 NULL 762 -755927849 -51.0 NULL a10E76jX35YwquKCTA s7473frMk58vm 1969-12-31 16:00:08.451 NULL true true 3.35 5.56789
+NULL 1016 3432650 1864027286 NULL 1016.0 0SPVSOVDI73t 4KWs6gw7lv2WYd66P NULL 1969-12-31 16:00:12.364 false true 3.35 5.56789
+NULL 10144 4756105 1864027286 NULL 10144.0 bvoO6VwRmH6181mdOm87Do 4KWs6gw7lv2WYd66P NULL 1969-12-31 16:00:12.134 true true 3.35 5.56789
+NULL 10653 3887593 1864027286 NULL 10653.0 2wak50xB5nHswbX 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:48.858 false true 3.35 5.56789
+NULL 10782 1286921 1864027286 NULL 10782.0 ODLrXI8882q8LS8 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:52.138 true true 3.35 5.56789
+NULL 197 762 1864027286 NULL 2563.58 3WsVeqb28VWEEOLI8ail 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:45.603 true true 3.35 5.56789
+NULL 1535 86028 1864027286 NULL 1535.0 T2o8XRFAL0HC4ikDQnfoCymw 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:54.662 true true 3.35 5.56789
+NULL 5064 504142 1864027286 NULL 5064.0 PlOxor04p5cvVl 4KWs6gw7lv2WYd66P NULL 1969-12-31 16:00:09.828 true true 3.35 5.56789
+NULL -3799 1248059 1864027286 NULL -3799.0 Uhps6mMh3IfHB3j7yH62K 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:54.622 false true 3.35 5.56789
+NULL 10299 799471 1864027286 NULL 10299.0 2fu24 4KWs6gw7lv2WYd66P NULL 1969-12-31 15:59:52.516 false true 3.35 5.56789
+NULL -8915 2101183 1864027286 NULL -8915.0 x7By66525 4KWs6gw7lv2WYd66P NULL 1969-12-31 16:00:05.831 false true 3.35 5.56789
+8 NULL 2433892 -1611863517 8.0 NULL 674ILv3V2TxFqXP6wSbL VLprkK2XfX 1969-12-31 16:00:15.892 NULL false true 3.35 5.56789
+8 NULL 3073556 332961835 8.0 NULL rR855m18hps5nkaFqE43W pH15gLf8B4yNFDWFH74 1969-12-31 16:00:15.892 NULL true true 3.35 5.56789
+8 NULL 6981 627355276 8.0 NULL K630vaVf 7gDn3I45FGIX0J6JH74PCEN 1969-12-31 16:00:15.892 NULL false true 3.35 5.56789
+8 NULL 2229621 -381406148 8.0 NULL q7onkS7QRPh5ghOK oKb0bi 1969-12-31 16:00:15.892 NULL true false 3.35 5.56789
+NULL 359 6981 -1887561756 NULL 9763215.5639 sF2CRfgt2K 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 16:00:12.489 false false 3.35 5.56789
+NULL -12328 3253295 -1887561756 NULL -12328.0 Ut5NYg5XWb 4hA4KQj2vD3fI6gX82220d NULL 1969-12-31 15:59:57.985 true false 3.35 5.56789
+11 NULL 1000828 1531084669 11.0 NULL wM316f6NqGIkoP388j3F6 poWQQo3Upvt3Wh 1969-12-31 16:00:02.351 NULL false true 3.35 5.56789
+11 NULL 6981 -1908387379 11.0 NULL a3EhVU6Wuy7ycJ7wY7h2gv 0542kSCNs54o7tD6e2YuI3 1969-12-31 16:00:02.351 NULL true false 3.35 5.56789
+11 NULL 1310786 -413875656 11.0 NULL W0rvA4H1xn0xMG4uk0 8yVVjG 1969-12-31 16:00:02.351 NULL false true 3.35 5.56789
+11 NULL 6981 -667592125 11.0 NULL NULL xIVF2uu7 1969-12-31 16:00:02.351 NULL NULL true 3.35 5.56789
+11 NULL 3583612 -1172590956 11.0 NULL hrSdTD2Q05 mJ5nwN6o4s8Hi4 1969-12-31 16:00:02.351 NULL true true 3.35 5.56789
+11 NULL 6981 1532810435 11.0 NULL Y5x3JuI3M8jngv5N L760FuvYP 1969-12-31 16:00:02.351 NULL true true 3.35 5.56789
+11 NULL 4972984 -483828108 11.0 NULL Sf45K8ueb68jp6s8 jPWX6Wr4fmTBSc5HSlX1r 1969-12-31 16:00:02.351 NULL true false 3.35 5.56789
+11 NULL 762 -1005594359 11.0 NULL BLoMwUJ51ns6pd FtT7S 1969-12-31 16:00:02.351 NULL false false 3.35 5.56789
+NULL 359 762 -1645852809 NULL 9763215.5639 40ks5556SV xH7445Rals48VOulSyR5F NULL 1969-12-31 15:59:55.352 false false 3.35 5.56789
+NULL -75 6981 -1645852809 NULL -863.257 o5mb0QP5Y48Qd4vdB0 xH7445Rals48VOulSyR5F NULL 1969-12-31 15:59:44.062 true false 3.35 5.56789
+NULL -75 6981 -1645852809 NULL -863.257 1FNNhmiFLGw425NA13g xH7445Rals48VOulSyR5F NULL 1969-12-31 15:59:58.463 false false 3.35 5.56789
+NULL -13036 1288927 -1645852809 NULL -13036.0 yinBY725P7V2 xH7445Rals48VOulSyR5F NULL 1969-12-31 16:00:00.763 true false 3.35 5.56789
+PREHOOK: query: DROP TABLE orc_llap_n0
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_llap_n0
+PREHOOK: Output: default@orc_llap_n0
+POSTHOOK: query: DROP TABLE orc_llap_n0
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_llap_n0
+POSTHOOK: Output: default@orc_llap_n0
+PREHOOK: query: drop table llap_temp_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@llap_temp_table
+PREHOOK: Output: default@llap_temp_table
+POSTHOOK: query: drop table llap_temp_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@llap_temp_table
+POSTHOOK: Output: default@llap_temp_table
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/llap_vector_nohybridgrace.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_vector_nohybridgrace.q.out b/ql/src/test/results/clientpositive/llap/llap_vector_nohybridgrace.q.out
index b361b1e..b3b2dcc 100644
--- a/ql/src/test/results/clientpositive/llap/llap_vector_nohybridgrace.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_vector_nohybridgrace.q.out
@@ -91,8 +91,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -133,8 +133,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -270,8 +270,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -312,8 +312,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/materialized_view_create.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create.q.out
index 5837963..9a70096 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_create.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_create.q.out
@@ -50,7 +50,7 @@ Table Parameters:
numFiles 1
numRows 5
rawDataSize 1025
- totalSize 503
+ totalSize 501
#### A masked pattern was here ####
# Storage Information
@@ -111,7 +111,7 @@ Table Parameters:
numFiles 1
numRows 5
rawDataSize 580
- totalSize 348
+ totalSize 345
#### A masked pattern was here ####
# Storage Information
@@ -247,7 +247,7 @@ key value
numFiles 1
numRows 5
rawDataSize 1605
-totalSize 702
+totalSize 703
#### A masked pattern was here ####
PREHOOK: query: drop materialized view cmv_mat_view_n4
PREHOOK: type: DROP_MATERIALIZED_VIEW
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
index 10039cc..3d5acca 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
@@ -400,7 +400,7 @@ Table Type: MATERIALIZED_VIEW
Table Parameters:
bucketing_version 2
numFiles 2
- totalSize 1078
+ totalSize 1076
transactional true
transactional_properties default
#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/materialized_view_describe.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_describe.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_describe.q.out
index 7cf7132..85092a0 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_describe.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_describe.q.out
@@ -73,7 +73,7 @@ Table Parameters:
numFiles 1
numRows 5
rawDataSize 580
- totalSize 348
+ totalSize 345
#### A masked pattern was here ####
# Storage Information
@@ -100,7 +100,7 @@ key foo
numFiles 1
numRows 5
rawDataSize 580
-totalSize 348
+totalSize 345
#### A masked pattern was here ####
PREHOOK: query: select a, c from cmv_mat_view_n8
PREHOOK: type: QUERY
@@ -242,7 +242,7 @@ Table Parameters:
numFiles 1
numRows 5
rawDataSize 1025
- totalSize 503
+ totalSize 501
#### A masked pattern was here ####
# Storage Information
[48/67] [abbrv] hive git commit: HIVE-19786: RpcServer cancelTask log
message is incorrect (Bharathkrishna Guruvayoor Murali,
reviewed by Sahil Takiar)
Posted by se...@apache.org.
HIVE-19786: RpcServer cancelTask log message is incorrect (Bharathkrishna Guruvayoor Murali, reviewed by Sahil Takiar)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4810511d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4810511d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4810511d
Branch: refs/heads/master-txnstats
Commit: 4810511d6e2b4377b20d70122788d5ad300d8df1
Parents: 8c07676
Author: Bharathkrishna Guruvayoor Murali <bh...@cloudera.com>
Authored: Mon Jun 18 10:17:11 2018 -0500
Committer: Sahil Takiar <st...@cloudera.com>
Committed: Mon Jun 18 10:17:11 2018 -0500
----------------------------------------------------------------------
.../src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/4810511d/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java
----------------------------------------------------------------------
diff --git a/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java b/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java
index f1383d6..babcb54 100644
--- a/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcServer.java
@@ -101,7 +101,8 @@ public class RpcServer implements Closeable {
Runnable cancelTask = new Runnable() {
@Override
public void run() {
- LOG.warn("Timed out waiting for test message from Remote Spark driver.");
+ LOG.warn("Timed out waiting for the completion of SASL negotiation "
+ + "between HiveServer2 and the Remote Spark Driver.");
newRpc.close();
}
};
[47/67] [abbrv] hive git commit: HIVE-19787: Log message when
spark-submit has completed (Bharathkrishna Guruvayoor Murali,
reviewed by Sahil Takiar)
Posted by se...@apache.org.
HIVE-19787: Log message when spark-submit has completed (Bharathkrishna Guruvayoor Murali, reviewed by Sahil Takiar)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8c076762
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8c076762
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8c076762
Branch: refs/heads/master-txnstats
Commit: 8c0767625069418871194f418b99bce8cca1007b
Parents: c89cf6d
Author: Bharathkrishna Guruvayoor Murali <bh...@cloudera.com>
Authored: Mon Jun 18 10:12:10 2018 -0500
Committer: Sahil Takiar <st...@cloudera.com>
Committed: Mon Jun 18 10:12:10 2018 -0500
----------------------------------------------------------------------
.../java/org/apache/hive/spark/client/SparkSubmitSparkClient.java | 2 ++
1 file changed, 2 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/8c076762/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
----------------------------------------------------------------------
diff --git a/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java b/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
index 1a524b9..31e89b8 100644
--- a/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/SparkSubmitSparkClient.java
@@ -211,6 +211,8 @@ class SparkSubmitSparkClient extends AbstractSparkClient {
LOG.warn("Child process exited with code {}", exitCode);
rpcServer.cancelClient(clientId,
"Child process (spark-submit) exited before connecting back with error log " + errStr.toString());
+ } else {
+ LOG.info("Child process (spark-submit) exited successfully.");
}
} catch (InterruptedException ie) {
LOG.warn("Thread waiting on the child process (spark-submit) is interrupted, killing the child process.");
[43/67] [abbrv] hive git commit: HIVE-19909: qtests: retire
hadoop_major version specific tests;
and logics (Zoltan Haindrich reviewed by Teddy Choi)
Posted by se...@apache.org.
HIVE-19909: qtests: retire hadoop_major version specific tests; and logics (Zoltan Haindrich reviewed by Teddy Choi)
Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4ec256c2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4ec256c2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4ec256c2
Branch: refs/heads/master-txnstats
Commit: 4ec256c23d5986385f0ad4ff0ae43b72822b6756
Parents: ebd2c5f
Author: Zoltan Haindrich <ki...@rxd.hu>
Authored: Mon Jun 18 10:35:12 2018 +0200
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Mon Jun 18 10:35:12 2018 +0200
----------------------------------------------------------------------
.../src/test/queries/negative/cascade_dbdrop.q | 1 -
.../queries/negative/cascade_dbdrop_hadoop20.q | 29 --
.../control/AbstractCoreBlobstoreCliDriver.java | 7 -
.../hive/cli/control/CoreAccumuloCliDriver.java | 5 -
.../hadoop/hive/cli/control/CoreCliDriver.java | 8 -
.../hive/cli/control/CoreCompareCliDriver.java | 7 +-
.../hive/cli/control/CoreHBaseCliDriver.java | 5 -
.../cli/control/CoreHBaseNegativeCliDriver.java | 5 -
.../hive/cli/control/CoreNegativeCliDriver.java | 7 +-
.../hive/cli/control/CorePerfCliDriver.java | 10 +-
.../org/apache/hadoop/hive/ql/QTestUtil.java | 110 +------
ql/src/test/queries/clientnegative/archive1.q | 1 -
ql/src/test/queries/clientnegative/archive2.q | 1 -
ql/src/test/queries/clientnegative/archive3.q | 1 -
ql/src/test/queries/clientnegative/archive4.q | 1 -
.../queries/clientnegative/archive_corrupt.q | 1 -
.../queries/clientnegative/archive_insert1.q | 1 -
.../queries/clientnegative/archive_insert2.q | 1 -
.../queries/clientnegative/archive_insert3.q | 1 -
.../queries/clientnegative/archive_insert4.q | 1 -
.../queries/clientnegative/archive_multi1.q | 1 -
.../queries/clientnegative/archive_multi2.q | 1 -
.../queries/clientnegative/archive_multi3.q | 1 -
.../queries/clientnegative/archive_multi4.q | 1 -
.../queries/clientnegative/archive_multi5.q | 1 -
.../queries/clientnegative/archive_multi6.q | 1 -
.../queries/clientnegative/archive_multi7.q | 1 -
.../queries/clientnegative/archive_partspec1.q | 1 -
.../queries/clientnegative/archive_partspec2.q | 1 -
.../queries/clientnegative/archive_partspec3.q | 1 -
.../queries/clientnegative/archive_partspec4.q | 1 -
.../queries/clientnegative/archive_partspec5.q | 1 -
ql/src/test/queries/clientnegative/autolocal1.q | 16 --
.../clientnegative/mapreduce_stack_trace.q | 1 -
.../mapreduce_stack_trace_turnoff.q | 1 -
.../alter_numbuckets_partitioned_table_h23.q | 1 -
.../test/queries/clientpositive/archive_multi.q | 1 -
.../test/queries/clientpositive/auto_join14.q | 1 -
.../clientpositive/auto_join14_hadoop20.q | 20 --
.../cbo_rp_udaf_percentile_approx_23.q | 1 -
ql/src/test/queries/clientpositive/combine2.q | 1 -
.../queries/clientpositive/combine2_hadoop20.q | 50 ----
ql/src/test/queries/clientpositive/ctas.q | 1 -
.../queries/clientpositive/groupby_sort_1.q | 283 ------------------
.../queries/clientpositive/groupby_sort_1_23.q | 1 -
.../clientpositive/groupby_sort_skew_1.q | 285 -------------------
.../clientpositive/groupby_sort_skew_1_23.q | 1 -
.../infer_bucket_sort_list_bucket.q | 1 -
ql/src/test/queries/clientpositive/input12.q | 1 -
.../queries/clientpositive/input12_hadoop20.q | 24 --
ql/src/test/queries/clientpositive/input39.q | 1 -
.../queries/clientpositive/input39_hadoop20.q | 31 --
ql/src/test/queries/clientpositive/join14.q | 1 -
.../queries/clientpositive/join14_hadoop20.q | 17 --
.../test/queries/clientpositive/lb_fs_stats.q | 1 -
.../queries/clientpositive/list_bucket_dml_1.q | 1 -
.../queries/clientpositive/list_bucket_dml_11.q | 1 -
.../queries/clientpositive/list_bucket_dml_12.q | 1 -
.../queries/clientpositive/list_bucket_dml_13.q | 1 -
.../queries/clientpositive/list_bucket_dml_14.q | 1 -
.../queries/clientpositive/list_bucket_dml_2.q | 1 -
.../queries/clientpositive/list_bucket_dml_3.q | 1 -
.../queries/clientpositive/list_bucket_dml_4.q | 1 -
.../queries/clientpositive/list_bucket_dml_5.q | 1 -
.../queries/clientpositive/list_bucket_dml_6.q | 1 -
.../queries/clientpositive/list_bucket_dml_7.q | 1 -
.../queries/clientpositive/list_bucket_dml_8.q | 1 -
.../queries/clientpositive/list_bucket_dml_9.q | 1 -
.../list_bucket_query_multiskew_1.q | 1 -
.../list_bucket_query_multiskew_2.q | 1 -
.../list_bucket_query_multiskew_3.q | 1 -
.../list_bucket_query_oneskew_1.q | 1 -
.../list_bucket_query_oneskew_2.q | 1 -
.../list_bucket_query_oneskew_3.q | 1 -
.../test/queries/clientpositive/loadpart_err.q | 21 --
.../test/queries/clientpositive/recursive_dir.q | 1 -
ql/src/test/queries/clientpositive/sample10.q | 1 -
.../clientpositive/sample_islocalmode_hook.q | 1 -
.../sample_islocalmode_hook_hadoop20.q | 42 ---
.../sample_islocalmode_hook_use_metadata.q | 1 -
.../clientpositive/skewjoin_union_remove_1.q | 1 -
.../clientpositive/skewjoin_union_remove_2.q | 1 -
.../queries/clientpositive/stats_list_bucket.q | 1 -
.../truncate_column_list_bucket.q | 1 -
.../test/queries/clientpositive/uber_reduce.q | 1 -
.../clientpositive/udaf_percentile_approx_20.q | 87 ------
.../clientpositive/udaf_percentile_approx_23.q | 1 -
.../queries/clientpositive/union_remove_1.q | 1 -
.../queries/clientpositive/union_remove_10.q | 1 -
.../queries/clientpositive/union_remove_11.q | 1 -
.../queries/clientpositive/union_remove_12.q | 1 -
.../queries/clientpositive/union_remove_13.q | 1 -
.../queries/clientpositive/union_remove_14.q | 1 -
.../queries/clientpositive/union_remove_15.q | 1 -
.../queries/clientpositive/union_remove_16.q | 1 -
.../queries/clientpositive/union_remove_17.q | 1 -
.../queries/clientpositive/union_remove_18.q | 1 -
.../queries/clientpositive/union_remove_19.q | 1 -
.../queries/clientpositive/union_remove_2.q | 1 -
.../queries/clientpositive/union_remove_20.q | 1 -
.../queries/clientpositive/union_remove_21.q | 1 -
.../queries/clientpositive/union_remove_22.q | 1 -
.../queries/clientpositive/union_remove_23.q | 1 -
.../queries/clientpositive/union_remove_24.q | 1 -
.../queries/clientpositive/union_remove_25.q | 1 -
.../queries/clientpositive/union_remove_3.q | 1 -
.../queries/clientpositive/union_remove_4.q | 1 -
.../queries/clientpositive/union_remove_5.q | 1 -
.../queries/clientpositive/union_remove_7.q | 1 -
.../queries/clientpositive/union_remove_8.q | 1 -
.../queries/clientpositive/union_remove_9.q | 1 -
111 files changed, 15 insertions(+), 1144 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/hbase-handler/src/test/queries/negative/cascade_dbdrop.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/negative/cascade_dbdrop.q b/hbase-handler/src/test/queries/negative/cascade_dbdrop.q
index 7f9df5e..266aa06 100644
--- a/hbase-handler/src/test/queries/negative/cascade_dbdrop.q
+++ b/hbase-handler/src/test/queries/negative/cascade_dbdrop.q
@@ -1,7 +1,6 @@
CREATE DATABASE hbaseDB;
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
-- Hadoop 0.23 changes the behavior FsShell on Exit Codes
-- In Hadoop 0.20
-- Exit Code == 0 on success
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/hbase-handler/src/test/queries/negative/cascade_dbdrop_hadoop20.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/negative/cascade_dbdrop_hadoop20.q b/hbase-handler/src/test/queries/negative/cascade_dbdrop_hadoop20.q
deleted file mode 100644
index 8fa8c8a..0000000
--- a/hbase-handler/src/test/queries/negative/cascade_dbdrop_hadoop20.q
+++ /dev/null
@@ -1,29 +0,0 @@
-
-CREATE DATABASE hbaseDB;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
--- Hadoop 0.23 changes the behavior FsShell on Exit Codes
--- In Hadoop 0.20
--- Exit Code == 0 on success
--- Exit code < 0 on any failure
--- In Hadoop 0.23
--- Exit Code == 0 on success
--- Exit Code < 0 on syntax/usage error
--- Exit Code > 0 operation failed
-
-CREATE TABLE hbaseDB.hbase_table_0(key int, value string)
-STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0");
-
-dfs -ls target/tmp/hbase/data/default/hbase_table_0;
-
-DROP DATABASE IF EXISTS hbaseDB CASCADE;
-
-dfs -ls target/tmp/hbase/data/hbase/default/hbase_table_0;
-
-
-
-
-
-
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
index dd80424..764a4d8 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/AbstractCoreBlobstoreCliDriver.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hive.cli.control;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import com.google.common.base.Strings;
-
import java.io.File;
import java.text.SimpleDateFormat;
import java.util.Calendar;
@@ -133,11 +131,6 @@ public abstract class AbstractCoreBlobstoreCliDriver extends CliAdapter {
System.err.println("Begin query: " + fname);
qt.addFile(fpath);
-
- if (qt.shouldBeSkipped(fname)) {
- System.err.println("Test " + fname + " skipped");
- return;
- }
qt.cliInit(new File(fpath), false);
int ecode = qt.executeClient(fname);
if ((ecode == 0) ^ expectSuccess) {
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
index 9c9ba18..648a05d 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
@@ -90,11 +90,6 @@ public class CoreAccumuloCliDriver extends CliAdapter {
qt.addFile(fpath);
- if (qt.shouldBeSkipped(fname)) {
- System.err.println("Test " + fname + " skipped");
- return;
- }
-
qt.cliInit(new File(fpath), false);
qt.clearTestSideEffects();
int ecode = qt.executeClient(fname);
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
index a7ec4f3..e588592 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCliDriver.java
@@ -165,14 +165,6 @@ public class CoreCliDriver extends CliAdapter {
System.err.println("Begin query: " + fname);
qt.addFile(fpath);
-
- if (qt.shouldBeSkipped(fname)) {
- LOG.info("Test " + fname + " skipped");
- System.err.println("Test " + fname + " skipped");
- skipped = true;
- return;
- }
-
qt.cliInit(new File(fpath), false);
int ecode = qt.executeClient(fname);
if (ecode != 0) {
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java
index c36d231..1ad76f9 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreCompareCliDriver.java
@@ -25,7 +25,6 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import com.google.common.base.Strings;
import org.apache.hadoop.hive.ql.QTestProcessExecResult;
import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
@@ -33,6 +32,8 @@ import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
+
+import com.google.common.base.Strings;
public class CoreCompareCliDriver extends CliAdapter{
private static QTestUtil qt;
@@ -128,10 +129,6 @@ public class CoreCompareCliDriver extends CliAdapter{
qt.addFile(new File(queryDirectory, versionFile), true);
}
- if (qt.shouldBeSkipped(fname)) {
- return;
- }
-
int ecode = 0;
qt.cliInit(new File(fpath), false);
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java
index b40b8d7..fc5f75d 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseCliDriver.java
@@ -110,11 +110,6 @@ public class CoreHBaseCliDriver extends CliAdapter {
qt.addFile(fpath);
- if (qt.shouldBeSkipped(fname)) {
- System.err.println("Test " + fname + " skipped");
- return;
- }
-
qt.cliInit(new File(fpath), false);
int ecode = qt.executeClient(fname);
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java
index e828dc7..8fb88d0 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreHBaseNegativeCliDriver.java
@@ -92,11 +92,6 @@ public class CoreHBaseNegativeCliDriver extends CliAdapter {
qt.addFile(fpath);
- if (qt.shouldBeSkipped(fname)) {
- System.err.println("Test " + fname + " skipped");
- return;
- }
-
qt.cliInit(new File(fpath));
qt.clearTestSideEffects();
int ecode = qt.executeClient(fname);
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java
index 176ac14..3be6f66 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreNegativeCliDriver.java
@@ -22,7 +22,6 @@ import static org.junit.Assert.fail;
import java.io.File;
-import com.google.common.base.Strings;
import org.apache.hadoop.hive.ql.QTestProcessExecResult;
import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
@@ -30,6 +29,8 @@ import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
+import com.google.common.base.Strings;
+
public class CoreNegativeCliDriver extends CliAdapter{
private QTestUtil qt;
@@ -116,10 +117,6 @@ public class CoreNegativeCliDriver extends CliAdapter{
qt.addFile(fpath);
- if (qt.shouldBeSkipped(fname)) {
- System.err.println("Test " + fname + " skipped");
- return;
- }
qt.cliInit(new File(fpath), false);
int ecode = qt.executeClient(fname);
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java
index 3ae691f..af91866 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CorePerfCliDriver.java
@@ -25,13 +25,14 @@ import static org.junit.Assert.fail;
import java.io.File;
-import com.google.common.base.Strings;
+import org.apache.hadoop.hive.ql.MetaStoreDumpUtility;
import org.apache.hadoop.hive.ql.QTestProcessExecResult;
import org.apache.hadoop.hive.ql.QTestUtil;
import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
-import org.apache.hadoop.hive.ql.MetaStoreDumpUtility;
import org.junit.After;
import org.junit.AfterClass;
+
+import com.google.common.base.Strings;
/**
This is the TestPerformance Cli Driver for integrating performance regression tests
as part of the Hive Unit tests.
@@ -125,11 +126,6 @@ public class CorePerfCliDriver extends CliAdapter{
System.err.println("Begin query: " + fname);
qt.addFile(fpath);
-
- if (qt.shouldBeSkipped(fname)) {
- return;
- }
-
qt.cliInit(new File(fpath), false);
int ecode = qt.executeClient(fname);
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index f19a3ad..2106fec 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -54,7 +54,6 @@ import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import junit.framework.TestSuite;
import org.apache.commons.io.IOUtils;
import org.apache.commons.io.output.ByteArrayOutputStream;
@@ -80,6 +79,9 @@ import org.apache.hadoop.hive.llap.io.api.LlapProxy;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache;
+import org.apache.hadoop.hive.ql.dataset.Dataset;
+import org.apache.hadoop.hive.ql.dataset.DatasetCollection;
+import org.apache.hadoop.hive.ql.dataset.DatasetParser;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -105,9 +107,6 @@ import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
import org.apache.hadoop.hive.ql.processors.HiveCommand;
import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.ql.dataset.DatasetCollection;
-import org.apache.hadoop.hive.ql.dataset.DatasetParser;
-import org.apache.hadoop.hive.ql.dataset.Dataset;
import org.apache.hadoop.hive.shims.HadoopShims;
import org.apache.hadoop.hive.shims.HadoopShims.HdfsErasureCodingShim;
import org.apache.hadoop.hive.shims.ShimLoader;
@@ -119,14 +118,15 @@ import org.apache.tools.ant.BuildException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
+import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
-import org.junit.Assert;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import junit.framework.TestSuite;
/**
* QTestUtil.
@@ -164,7 +164,6 @@ public class QTestUtil {
protected String overrideResultsDir;
protected final String logDir;
private final TreeMap<String, String> qMap;
- private final Set<String> qSkipSet;
private final Set<String> qSortSet;
private final Set<String> qSortQuerySet;
private final Set<String> qHashQuerySet;
@@ -173,7 +172,6 @@ public class QTestUtil {
private final Set<String> qMaskStatsQuerySet;
private final Set<String> qMaskDataSizeQuerySet;
private final Set<String> qMaskLineageQuerySet;
- private final Set<String> qJavaVersionSpecificOutput;
private static final String SORT_SUFFIX = ".sorted";
private static Set<String> srcTables;
private final Set<String> srcUDFs;
@@ -594,7 +592,6 @@ public class QTestUtil {
conf = queryState.getConf();
this.hadoopVer = getHadoopMainVersion(hadoopVer);
qMap = new TreeMap<String, String>();
- qSkipSet = new HashSet<String>();
qSortSet = new HashSet<String>();
qSortQuerySet = new HashSet<String>();
qHashQuerySet = new HashSet<String>();
@@ -603,7 +600,6 @@ public class QTestUtil {
qMaskStatsQuerySet = new HashSet<String>();
qMaskDataSizeQuerySet = new HashSet<String>();
qMaskLineageQuerySet = new HashSet<String>();
- qJavaVersionSpecificOutput = new HashSet<String>();
this.clusterType = clusterType;
HadoopShims shims = ShimLoader.getHadoopShims();
@@ -839,14 +835,6 @@ public class QTestUtil {
return;
}
- if(checkHadoopVersionExclude(qf.getName(), query)) {
- qSkipSet.add(qf.getName());
- }
-
- if (checkNeedJavaSpecificOutput(qf.getName(), query)) {
- qJavaVersionSpecificOutput.add(qf.getName());
- }
-
if (matches(SORT_BEFORE_DIFF, query)) {
qSortSet.add(qf.getName());
} else if (matches(SORT_QUERY_RESULTS, query)) {
@@ -888,79 +876,6 @@ public class QTestUtil {
return false;
}
- private boolean checkHadoopVersionExclude(String fileName, String query){
-
- // Look for a hint to not run a test on some Hadoop versions
- Pattern pattern = Pattern.compile("-- (EX|IN)CLUDE_HADOOP_MAJOR_VERSIONS\\((.*)\\)");
-
- boolean excludeQuery = false;
- boolean includeQuery = false;
- Set<String> versionSet = new HashSet<String>();
- String hadoopVer = ShimLoader.getMajorVersion();
-
- Matcher matcher = pattern.matcher(query);
-
- // Each qfile may include at most one INCLUDE or EXCLUDE directive.
- //
- // If a qfile contains an INCLUDE directive, and hadoopVer does
- // not appear in the list of versions to include, then the qfile
- // is skipped.
- //
- // If a qfile contains an EXCLUDE directive, and hadoopVer is
- // listed in the list of versions to EXCLUDE, then the qfile is
- // skipped.
- //
- // Otherwise, the qfile is included.
-
- if (matcher.find()) {
-
- String prefix = matcher.group(1);
- if ("EX".equals(prefix)) {
- excludeQuery = true;
- } else {
- includeQuery = true;
- }
-
- String versions = matcher.group(2);
- for (String s : versions.split("\\,")) {
- s = s.trim();
- versionSet.add(s);
- }
- }
-
- if (matcher.find()) {
- //2nd match is not supposed to be there
- String message = "QTestUtil: qfile " + fileName
- + " contains more than one reference to (EX|IN)CLUDE_HADOOP_MAJOR_VERSIONS";
- throw new UnsupportedOperationException(message);
- }
-
- if (excludeQuery && versionSet.contains(hadoopVer)) {
- System.out.println("QTestUtil: " + fileName
- + " EXCLUDE list contains Hadoop Version " + hadoopVer + ". Skipping...");
- return true;
- } else if (includeQuery && !versionSet.contains(hadoopVer)) {
- System.out.println("QTestUtil: " + fileName
- + " INCLUDE list does not contain Hadoop Version " + hadoopVer + ". Skipping...");
- return true;
- }
- return false;
- }
-
- private boolean checkNeedJavaSpecificOutput(String fileName, String query) {
- Pattern pattern = Pattern.compile("-- JAVA_VERSION_SPECIFIC_OUTPUT");
- Matcher matcher = pattern.matcher(query);
- if (matcher.find()) {
- System.out.println("Test is flagged to generate Java version specific " +
- "output. Since we are using Java version " + javaVersion +
- ", we will generated Java " + javaVersion + " specific " +
- "output file for query file " + fileName);
- return true;
- }
-
- return false;
- }
-
/**
* Get formatted Java version to include minor version, but
* exclude patch level.
@@ -1613,17 +1528,8 @@ public class QTestUtil {
return commands;
}
- public boolean shouldBeSkipped(String tname) {
- return qSkipSet.contains(tname);
- }
-
private String getOutFileExtension(String fname) {
- String outFileExtension = ".out";
- if (qJavaVersionSpecificOutput.contains(fname)) {
- outFileExtension = ".java" + javaVersion + ".out";
- }
-
- return outFileExtension;
+ return ".out";
}
public void convertSequenceFileToTextFile() throws Exception {
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive1.q b/ql/src/test/queries/clientnegative/archive1.q
index a66b5e2..6c11580 100644
--- a/ql/src/test/queries/clientnegative/archive1.q
+++ b/ql/src/test/queries/clientnegative/archive1.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE srcpart_archived LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive2.q b/ql/src/test/queries/clientnegative/archive2.q
index d879675..4bd0ef9 100644
--- a/ql/src/test/queries/clientnegative/archive2.q
+++ b/ql/src/test/queries/clientnegative/archive2.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to unarchive a non-archived partition
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
drop table tstsrcpart;
create table tstsrcpart like srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive3.q b/ql/src/test/queries/clientnegative/archive3.q
index c09243f..fb07a60 100644
--- a/ql/src/test/queries/clientnegative/archive3.q
+++ b/ql/src/test/queries/clientnegative/archive3.q
@@ -1,6 +1,5 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests archiving a table
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
ALTER TABLE srcpart ARCHIVE;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive4.q b/ql/src/test/queries/clientnegative/archive4.q
index befdc5f..8921f6d 100644
--- a/ql/src/test/queries/clientnegative/archive4.q
+++ b/ql/src/test/queries/clientnegative/archive4.q
@@ -1,6 +1,5 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests archiving multiple partitions
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
ALTER TABLE srcpart ARCHIVE PARTITION (ds='2008-04-08', hr='12') PARTITION (ds='2008-04-08', hr='11');
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_corrupt.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_corrupt.q b/ql/src/test/queries/clientnegative/archive_corrupt.q
index ab182d7..e5bda3f 100644
--- a/ql/src/test/queries/clientnegative/archive_corrupt.q
+++ b/ql/src/test/queries/clientnegative/archive_corrupt.q
@@ -8,7 +8,6 @@ drop table tstsrcpart;
create table tstsrcpart like srcpart;
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20)
-- The version of GzipCodec that is provided in Hadoop 0.20 silently ignores
-- file format errors. However, versions of Hadoop that include
-- HADOOP-6835 (e.g. 0.23 and 1.x) cause a Wrong File Format exception
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_insert1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_insert1.q b/ql/src/test/queries/clientnegative/archive_insert1.q
index 3663634..0b17464 100644
--- a/ql/src/test/queries/clientnegative/archive_insert1.q
+++ b/ql/src/test/queries/clientnegative/archive_insert1.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to insert into archived partition.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE tstsrcpart LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_insert2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_insert2.q b/ql/src/test/queries/clientnegative/archive_insert2.q
index c4d99fe..eeb3e62 100644
--- a/ql/src/test/queries/clientnegative/archive_insert2.q
+++ b/ql/src/test/queries/clientnegative/archive_insert2.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to insert into archived partition.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE tstsrcpart LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_insert3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_insert3.q b/ql/src/test/queries/clientnegative/archive_insert3.q
index 7a9f4fa..94ca892 100644
--- a/ql/src/test/queries/clientnegative/archive_insert3.q
+++ b/ql/src/test/queries/clientnegative/archive_insert3.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to create partition inside of archived directory.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE tstsrcpart LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_insert4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_insert4.q b/ql/src/test/queries/clientnegative/archive_insert4.q
index 52428f8..5d3ec6f 100644
--- a/ql/src/test/queries/clientnegative/archive_insert4.q
+++ b/ql/src/test/queries/clientnegative/archive_insert4.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to (possible) dynamic insert into archived partition.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE tstsrcpart LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi1.q b/ql/src/test/queries/clientnegative/archive_multi1.q
index bf60d5d..91366c2 100644
--- a/ql/src/test/queries/clientnegative/archive_multi1.q
+++ b/ql/src/test/queries/clientnegative/archive_multi1.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE tstsrcpart LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi2.q b/ql/src/test/queries/clientnegative/archive_multi2.q
index 92eff2f..9f342c9 100644
--- a/ql/src/test/queries/clientnegative/archive_multi2.q
+++ b/ql/src/test/queries/clientnegative/archive_multi2.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to unarchive a non-archived partition group
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
drop table tstsrcpart;
create table tstsrcpart like srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi3.q b/ql/src/test/queries/clientnegative/archive_multi3.q
index 29e4a00..c2e86ff 100644
--- a/ql/src/test/queries/clientnegative/archive_multi3.q
+++ b/ql/src/test/queries/clientnegative/archive_multi3.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to archive outer partition group containing other partition inside.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE tstsrcpart LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi4.q b/ql/src/test/queries/clientnegative/archive_multi4.q
index 98d766a..d5eb315 100644
--- a/ql/src/test/queries/clientnegative/archive_multi4.q
+++ b/ql/src/test/queries/clientnegative/archive_multi4.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to archive inner partition contained in archived partition group.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE tstsrcpart LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi5.q b/ql/src/test/queries/clientnegative/archive_multi5.q
index 1eeab17..73a684f 100644
--- a/ql/src/test/queries/clientnegative/archive_multi5.q
+++ b/ql/src/test/queries/clientnegative/archive_multi5.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to unarchive outer partition group containing other partition inside.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE tstsrcpart LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi6.q b/ql/src/test/queries/clientnegative/archive_multi6.q
index d335db9..5df07a3 100644
--- a/ql/src/test/queries/clientnegative/archive_multi6.q
+++ b/ql/src/test/queries/clientnegative/archive_multi6.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to unarchive inner partition contained in archived partition group.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE tstsrcpart LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_multi7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_multi7.q b/ql/src/test/queries/clientnegative/archive_multi7.q
index 4c3f06e..65e1025 100644
--- a/ql/src/test/queries/clientnegative/archive_multi7.q
+++ b/ql/src/test/queries/clientnegative/archive_multi7.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to archive a partition group with custom locations.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE tstsrcpart LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_partspec1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_partspec1.q b/ql/src/test/queries/clientnegative/archive_partspec1.q
index ead4268..9dd3e23 100644
--- a/ql/src/test/queries/clientnegative/archive_partspec1.q
+++ b/ql/src/test/queries/clientnegative/archive_partspec1.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE srcpart_archived LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_partspec2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_partspec2.q b/ql/src/test/queries/clientnegative/archive_partspec2.q
index ff4581f..0e55217 100644
--- a/ql/src/test/queries/clientnegative/archive_partspec2.q
+++ b/ql/src/test/queries/clientnegative/archive_partspec2.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE srcpart_archived LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_partspec3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_partspec3.q b/ql/src/test/queries/clientnegative/archive_partspec3.q
index ff29486..94f984c 100644
--- a/ql/src/test/queries/clientnegative/archive_partspec3.q
+++ b/ql/src/test/queries/clientnegative/archive_partspec3.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE srcpart_archived LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_partspec4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_partspec4.q b/ql/src/test/queries/clientnegative/archive_partspec4.q
index f27496f..48c5ec6 100644
--- a/ql/src/test/queries/clientnegative/archive_partspec4.q
+++ b/ql/src/test/queries/clientnegative/archive_partspec4.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE srcpart_archived LIKE srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/archive_partspec5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/archive_partspec5.q b/ql/src/test/queries/clientnegative/archive_partspec5.q
index d5df078..a8441be 100644
--- a/ql/src/test/queries/clientnegative/archive_partspec5.q
+++ b/ql/src/test/queries/clientnegative/archive_partspec5.q
@@ -1,7 +1,6 @@
--! qt:dataset:srcpart
set hive.archive.enabled = true;
-- Tests trying to archive a partition twice.
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
CREATE TABLE srcpart_archived (key string, value string) partitioned by (ds string, hr int, min int);
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/autolocal1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/autolocal1.q b/ql/src/test/queries/clientnegative/autolocal1.q
deleted file mode 100644
index 51f5bd5..0000000
--- a/ql/src/test/queries/clientnegative/autolocal1.q
+++ /dev/null
@@ -1,16 +0,0 @@
---! qt:dataset:src
-set mapred.job.tracker=abracadabra;
-set hive.exec.mode.local.auto.inputbytes.max=1;
-set hive.exec.mode.local.auto=true;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20)
--- hadoop0.23 changes the behavior of JobClient initialization
--- in hadoop0.20, JobClient initialization tries to get JobTracker's address
--- this throws the expected IllegalArgumentException
--- in hadoop0.23, JobClient initialization only initializes cluster
--- and get user group information
--- not attempts to get JobTracker's address
--- no IllegalArgumentException thrown in JobClient Initialization
--- an exception is thrown when JobClient submitJob
-
-SELECT key FROM src;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/mapreduce_stack_trace.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/mapreduce_stack_trace.q b/ql/src/test/queries/clientnegative/mapreduce_stack_trace.q
index 37bb54d..953a7e4 100644
--- a/ql/src/test/queries/clientnegative/mapreduce_stack_trace.q
+++ b/ql/src/test/queries/clientnegative/mapreduce_stack_trace.q
@@ -7,7 +7,6 @@ set hive.exec.failure.hooks=org.apache.hadoop.hive.ql.hooks.VerifySessionStateSt
FROM src SELECT TRANSFORM(key, value) USING 'script_does_not_exist' AS (key, value);
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Hadoop 0.23 changes the getTaskDiagnostics behavior
-- The Error Code of hive failure MapReduce job changes
-- In Hadoop 0.20
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientnegative/mapreduce_stack_trace_turnoff.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/mapreduce_stack_trace_turnoff.q b/ql/src/test/queries/clientnegative/mapreduce_stack_trace_turnoff.q
index 946374b..d1b588b 100644
--- a/ql/src/test/queries/clientnegative/mapreduce_stack_trace_turnoff.q
+++ b/ql/src/test/queries/clientnegative/mapreduce_stack_trace_turnoff.q
@@ -7,7 +7,6 @@ set hive.exec.failure.hooks=org.apache.hadoop.hive.ql.hooks.VerifySessionStateSt
FROM src SELECT TRANSFORM(key, value) USING 'script_does_not_exist' AS (key, value);
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Hadoop 0.23 changes the getTaskDiagnostics behavior
-- The Error Code of hive failure MapReduce job changes
-- In Hadoop 0.20
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
index 163ca8f..d4e1a19 100644
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
+++ b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
@@ -1,5 +1,4 @@
--! qt:dataset:src
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
create table tst1_n1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets;
alter table tst1_n1 clustered by (key) into 8 buckets;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/archive_multi.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/archive_multi.q b/ql/src/test/queries/clientpositive/archive_multi.q
index 60cb4a9..b372ea2 100644
--- a/ql/src/test/queries/clientpositive/archive_multi.q
+++ b/ql/src/test/queries/clientpositive/archive_multi.q
@@ -23,7 +23,6 @@ select key, value from default.srcpart where ds='2008-04-09' and hr='11';
insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-09', hr='12')
select key, value from default.srcpart where ds='2008-04-09' and hr='12';
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col
FROM (SELECT * FROM ac_test.tstsrcpart WHERE ds='2008-04-08') subq1) subq2;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/auto_join14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_join14.q b/ql/src/test/queries/clientpositive/auto_join14.q
index 11829cc..1f6e0eb 100644
--- a/ql/src/test/queries/clientpositive/auto_join14.q
+++ b/ql/src/test/queries/clientpositive/auto_join14.q
@@ -4,7 +4,6 @@ set hive.mapred.mode=nonstrict;
set hive.auto.convert.join = true;
--- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
CREATE TABLE dest1_n83(c1 INT, c2 STRING) STORED AS TEXTFILE;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q b/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q
deleted file mode 100644
index 0c6b900..0000000
--- a/ql/src/test/queries/clientpositive/auto_join14_hadoop20.q
+++ /dev/null
@@ -1,20 +0,0 @@
---! qt:dataset:srcpart
---! qt:dataset:src
-
-set hive.auto.convert.join = true;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
-
-CREATE TABLE dest1_n74(c1 INT, c2 STRING) STORED AS TEXTFILE;
-
-set mapred.job.tracker=localhost:58;
-set hive.exec.mode.local.auto=true;
-
-explain
-FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100
-INSERT OVERWRITE TABLE dest1_n74 SELECT src.key, srcpart.value;
-
-FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100
-INSERT OVERWRITE TABLE dest1_n74 SELECT src.key, srcpart.value;
-
-SELECT sum(hash(dest1_n74.c1,dest1_n74.c2)) FROM dest1_n74;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q b/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q
index ba198eb..d59bd24 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_udaf_percentile_approx_23.q
@@ -2,7 +2,6 @@ set hive.strict.checks.bucketing=false;
set hive.mapred.mode=nonstrict;
set hive.cbo.returnpath.hiveop=true;
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- 0.23 changed input order of data in reducer task, which affects result of percentile_approx
CREATE TABLE bucket_n1 (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/combine2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/combine2.q b/ql/src/test/queries/clientpositive/combine2.q
index 5b19bc0..9d33c1a 100644
--- a/ql/src/test/queries/clientpositive/combine2.q
+++ b/ql/src/test/queries/clientpositive/combine2.q
@@ -17,7 +17,6 @@ set hive.merge.smallfiles.avgsize=0;
create table combine2_n0(key string) partitioned by (value string);
--- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
-- This test sets mapred.max.split.size=256 and hive.merge.smallfiles.avgsize=0
-- in an attempt to force the generation of multiple splits and multiple output files.
-- However, Hadoop 0.20 is incapable of generating splits smaller than the block size
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/combine2_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/combine2_hadoop20.q b/ql/src/test/queries/clientpositive/combine2_hadoop20.q
deleted file mode 100644
index 3f45ae5..0000000
--- a/ql/src/test/queries/clientpositive/combine2_hadoop20.q
+++ /dev/null
@@ -1,50 +0,0 @@
---! qt:dataset:srcpart
---! qt:dataset:src
-USE default;
-
-set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-set mapred.min.split.size=256;
-set mapred.min.split.size.per.node=256;
-set mapred.min.split.size.per.rack=256;
-set mapred.max.split.size=256;
-set hive.exec.dynamic.partition=true;
-set hive.exec.dynamic.partition.mode=nonstrict;
-set mapred.cache.shared.enabled=false;
-set hive.merge.smallfiles.avgsize=0;
-
--- SORT_QUERY_RESULTS
-
-create table combine2(key string) partitioned by (value string);
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
--- This test sets mapred.max.split.size=256 and hive.merge.smallfiles.avgsize=0
--- in an attempt to force the generation of multiple splits and multiple output files.
--- However, Hadoop 0.20 is incapable of generating splits smaller than the block size
--- when using CombineFileInputFormat, so only one split is generated. This has a
--- significant impact on the results results of this test.
--- This issue was fixed in MAPREDUCE-2046 which is included in 0.22.
-
-insert overwrite table combine2 partition(value)
-select * from (
- select key, value from src where key < 10
- union all
- select key, '|' as value from src where key = 11
- union all
- select key, '2010-04-21 09:45:00' value from src where key = 19) s;
-
-show partitions combine2;
-
-explain
-select key, value from combine2 where value is not null;
-
-select key, value from combine2 where value is not null;
-
-explain extended
-select count(1) from combine2 where value is not null;
-
-select count(1) from combine2 where value is not null;
-
-explain
-select ds, count(1) from srcpart where ds is not null group by ds;
-
-select ds, count(1) from srcpart where ds is not null group by ds;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/ctas.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/ctas.q b/ql/src/test/queries/clientpositive/ctas.q
index dbed475..c4fdda1 100644
--- a/ql/src/test/queries/clientpositive/ctas.q
+++ b/ql/src/test/queries/clientpositive/ctas.q
@@ -1,6 +1,5 @@
--! qt:dataset:src
set hive.explain.user=false;
--- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
-- SORT_QUERY_RESULTS
create table nzhang_Tmp(a int, b string);
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/groupby_sort_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_1.q b/ql/src/test/queries/clientpositive/groupby_sort_1.q
deleted file mode 100644
index 46ec0be..0000000
--- a/ql/src/test/queries/clientpositive/groupby_sort_1.q
+++ /dev/null
@@ -1,283 +0,0 @@
-;
-
-set hive.exec.reducers.max = 10;
-set hive.map.groupby.sorted=true;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
--- SORT_QUERY_RESULTS
-
-CREATE TABLE T1_n4(key STRING, val STRING)
-CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n4;
-
--- perform an insert to make sure there are 2 files
-INSERT OVERWRITE TABLE T1_n4 select key, val from T1_n4;
-
-CREATE TABLE outputTbl1_n2(key int, cnt int);
-
--- The plan should be converted to a map-side group by if the group by key
--- matches the sorted key
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key, count(1) FROM T1_n4 GROUP BY key;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key, count(1) FROM T1_n4 GROUP BY key;
-
-SELECT * FROM outputTbl1_n2;
-
-CREATE TABLE outputTbl2_n0(key1 int, key2 string, cnt int);
-
--- no map-side group by even if the group by key is a superset of sorted key
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl2_n0
-SELECT key, val, count(1) FROM T1_n4 GROUP BY key, val;
-
-INSERT OVERWRITE TABLE outputTbl2_n0
-SELECT key, val, count(1) FROM T1_n4 GROUP BY key, val;
-
-SELECT * FROM outputTbl2_n0;
-
--- It should work for sub-queries
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key, count(1) FROM (SELECT key, val FROM T1_n4) subq1 GROUP BY key;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key, count(1) FROM (SELECT key, val FROM T1_n4) subq1 GROUP BY key;
-
-SELECT * FROM outputTbl1_n2;
-
--- It should work for sub-queries with column aliases
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n4) subq1 GROUP BY k;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n4) subq1 GROUP BY k;
-
-SELECT * FROM outputTbl1_n2;
-
-CREATE TABLE outputTbl3(key1 int, key2 int, cnt int);
-
--- The plan should be converted to a map-side group by if the group by key contains a constant followed
--- by a match to the sorted key
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl3
-SELECT 1, key, count(1) FROM T1_n4 GROUP BY 1, key;
-
-INSERT OVERWRITE TABLE outputTbl3
-SELECT 1, key, count(1) FROM T1_n4 GROUP BY 1, key;
-
-SELECT * FROM outputTbl3;
-
-CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int);
-
--- no map-side group by if the group by key contains a constant followed by another column
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl4
-SELECT key, 1, val, count(1) FROM T1_n4 GROUP BY key, 1, val;
-
-INSERT OVERWRITE TABLE outputTbl4
-SELECT key, 1, val, count(1) FROM T1_n4 GROUP BY key, 1, val;
-
-SELECT * FROM outputTbl4;
-
--- no map-side group by if the group by key contains a function
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl3
-SELECT key, key + 1, count(1) FROM T1_n4 GROUP BY key, key + 1;
-
-INSERT OVERWRITE TABLE outputTbl3
-SELECT key, key + 1, count(1) FROM T1_n4 GROUP BY key, key + 1;
-
-SELECT * FROM outputTbl3;
-
--- it should not matter what follows the group by
--- test various cases
-
--- group by followed by another group by
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key + key, sum(cnt) from
-(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq1
-group by key + key;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key + key, sum(cnt) from
-(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq1
-group by key + key;
-
-SELECT * FROM outputTbl1_n2;
-
--- group by followed by a union
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT * FROM (
-SELECT key, count(1) FROM T1_n4 GROUP BY key
- UNION ALL
-SELECT key, count(1) FROM T1_n4 GROUP BY key
-) subq1;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT * FROM (
-SELECT key, count(1) FROM T1_n4 GROUP BY key
- UNION ALL
-SELECT key, count(1) FROM T1_n4 GROUP BY key
-) subq1;
-
-SELECT * FROM outputTbl1_n2;
-
--- group by followed by a union where one of the sub-queries is map-side group by
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT * FROM (
-SELECT key, count(1) FROM T1_n4 GROUP BY key
- UNION ALL
-SELECT key + key as key, count(1) FROM T1_n4 GROUP BY key + key
-) subq1;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT * FROM (
-SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key
- UNION ALL
-SELECT key + key as key, count(1) as cnt FROM T1_n4 GROUP BY key + key
-) subq1;
-
-SELECT * FROM outputTbl1_n2;
-
--- group by followed by a join
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT subq1.key, subq1.cnt+subq2.cnt FROM
-(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq1
-JOIN
-(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq2
-ON subq1.key = subq2.key;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT subq1.key, subq1.cnt+subq2.cnt FROM
-(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq1
-JOIN
-(SELECT key, count(1) as cnt FROM T1_n4 GROUP BY key) subq2
-ON subq1.key = subq2.key;
-
-SELECT * FROM outputTbl1_n2;
-
--- group by followed by a join where one of the sub-queries can be performed in the mapper
-EXPLAIN EXTENDED
-SELECT * FROM
-(SELECT key, count(1) FROM T1_n4 GROUP BY key) subq1
-JOIN
-(SELECT key, val, count(1) FROM T1_n4 GROUP BY key, val) subq2
-ON subq1.key = subq2.key;
-
-CREATE TABLE T2_n3(key STRING, val STRING)
-CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE;
-
--- perform an insert to make sure there are 2 files
-INSERT OVERWRITE TABLE T2_n3 select key, val from T1_n4;
-
--- no mapside sort group by if the group by is a prefix of the sorted key
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key, count(1) FROM T2_n3 GROUP BY key;
-
-INSERT OVERWRITE TABLE outputTbl1_n2
-SELECT key, count(1) FROM T2_n3 GROUP BY key;
-
-SELECT * FROM outputTbl1_n2;
-
--- The plan should be converted to a map-side group by if the group by key contains a constant in between the
--- sorted keys
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl4
-SELECT key, 1, val, count(1) FROM T2_n3 GROUP BY key, 1, val;
-
-INSERT OVERWRITE TABLE outputTbl4
-SELECT key, 1, val, count(1) FROM T2_n3 GROUP BY key, 1, val;
-
-SELECT * FROM outputTbl4;
-
-CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int);
-
--- The plan should be converted to a map-side group by if the group by key contains a constant in between the
--- sorted keys followed by anything
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl5
-SELECT key, 1, val, 2, count(1) FROM T2_n3 GROUP BY key, 1, val, 2;
-
-INSERT OVERWRITE TABLE outputTbl5
-SELECT key, 1, val, 2, count(1) FROM T2_n3 GROUP BY key, 1, val, 2;
-
-SELECT * FROM outputTbl5;
-
--- contants from sub-queries should work fine
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl4
-SELECT key, constant, val, count(1) from
-(SELECT key, 1 as constant, val from T2_n3)subq
-group by key, constant, val;
-
-INSERT OVERWRITE TABLE outputTbl4
-SELECT key, constant, val, count(1) from
-(SELECT key, 1 as constant, val from T2_n3)subq
-group by key, constant, val;
-
-SELECT * FROM outputTbl4;
-
--- multiple levels of contants from sub-queries should work fine
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl4
-select key, constant3, val, count(1) from
-(
-SELECT key, constant as constant2, val, 2 as constant3 from
-(SELECT key, 1 as constant, val from T2_n3)subq
-)subq2
-group by key, constant3, val;
-
-INSERT OVERWRITE TABLE outputTbl4
-select key, constant3, val, count(1) from
-(
-SELECT key, constant as constant2, val, 2 as constant3 from
-(SELECT key, 1 as constant, val from T2_n3)subq
-)subq2
-group by key, constant3, val;
-
-SELECT * FROM outputTbl4;
-
-set hive.map.aggr=true;
-set hive.multigroupby.singlereducer=false;
-set mapred.reduce.tasks=31;
-
-CREATE TABLE DEST1_n7(key INT, cnt INT);
-CREATE TABLE DEST2_n1(key INT, val STRING, cnt INT);
-
-SET hive.exec.compress.intermediate=true;
-SET hive.exec.compress.output=true;
-
-EXPLAIN
-FROM T2_n3
-INSERT OVERWRITE TABLE DEST1_n7 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n1 SELECT key, val, count(1) GROUP BY key, val;
-
-FROM T2_n3
-INSERT OVERWRITE TABLE DEST1_n7 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n1 SELECT key, val, count(1) GROUP BY key, val;
-
-select * from DEST1_n7;
-select * from DEST2_n1;
-
--- multi-table insert with a sub-query
-EXPLAIN
-FROM (select key, val from T2_n3 where key = 8) x
-INSERT OVERWRITE TABLE DEST1_n7 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n1 SELECT key, val, count(1) GROUP BY key, val;
-
-FROM (select key, val from T2_n3 where key = 8) x
-INSERT OVERWRITE TABLE DEST1_n7 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n1 SELECT key, val, count(1) GROUP BY key, val;
-
-select * from DEST1_n7;
-select * from DEST2_n1;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_1_23.q b/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
index b27aec4..c97fcdd 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
@@ -2,7 +2,6 @@ set hive.mapred.mode=nonstrict;
set hive.exec.reducers.max = 10;
set hive.map.groupby.sorted=true;
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
-- SORT_QUERY_RESULTS
CREATE TABLE T1_n80(key STRING, val STRING)
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q b/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
deleted file mode 100644
index 7836c4d..0000000
--- a/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
+++ /dev/null
@@ -1,285 +0,0 @@
-;
-
-set hive.exec.reducers.max = 10;
-set hive.map.groupby.sorted=true;
-set hive.groupby.skewindata=true;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
--- SORT_QUERY_RESULTS
-
-CREATE TABLE T1_n35(key STRING, val STRING)
-CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH '../../data/files/bucket_files/000000_0' INTO TABLE T1_n35;
-
--- perform an insert to make sure there are 2 files
-INSERT OVERWRITE TABLE T1_n35 select key, val from T1_n35;
-
-CREATE TABLE outputTbl1_n8(key int, cnt int);
-
--- The plan should be converted to a map-side group by if the group by key
--- matches the sorted key
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key, count(1) FROM T1_n35 GROUP BY key;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key, count(1) FROM T1_n35 GROUP BY key;
-
-SELECT * FROM outputTbl1_n8;
-
-CREATE TABLE outputTbl2_n2(key1 int, key2 string, cnt int);
-
--- no map-side group by even if the group by key is a superset of sorted key
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl2_n2
-SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val;
-
-INSERT OVERWRITE TABLE outputTbl2_n2
-SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val;
-
-SELECT * FROM outputTbl2_n2;
-
--- It should work for sub-queries
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key, count(1) FROM (SELECT key, val FROM T1_n35) subq1 GROUP BY key;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key, count(1) FROM (SELECT key, val FROM T1_n35) subq1 GROUP BY key;
-
-SELECT * FROM outputTbl1_n8;
-
--- It should work for sub-queries with column aliases
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n35) subq1 GROUP BY k;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n35) subq1 GROUP BY k;
-
-SELECT * FROM outputTbl1_n8;
-
-CREATE TABLE outputTbl3_n0(key1 int, key2 int, cnt int);
-
--- The plan should be converted to a map-side group by if the group by key contains a constant followed
--- by a match to the sorted key
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl3_n0
-SELECT 1, key, count(1) FROM T1_n35 GROUP BY 1, key;
-
-INSERT OVERWRITE TABLE outputTbl3_n0
-SELECT 1, key, count(1) FROM T1_n35 GROUP BY 1, key;
-
-SELECT * FROM outputTbl3_n0;
-
-CREATE TABLE outputTbl4_n0(key1 int, key2 int, key3 string, cnt int);
-
--- no map-side group by if the group by key contains a constant followed by another column
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl4_n0
-SELECT key, 1, val, count(1) FROM T1_n35 GROUP BY key, 1, val;
-
-INSERT OVERWRITE TABLE outputTbl4_n0
-SELECT key, 1, val, count(1) FROM T1_n35 GROUP BY key, 1, val;
-
-SELECT * FROM outputTbl4_n0;
-
--- no map-side group by if the group by key contains a function
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl3_n0
-SELECT key, key + 1, count(1) FROM T1_n35 GROUP BY key, key + 1;
-
-INSERT OVERWRITE TABLE outputTbl3_n0
-SELECT key, key + 1, count(1) FROM T1_n35 GROUP BY key, key + 1;
-
-SELECT * FROM outputTbl3_n0;
-
--- it should not matter what follows the group by
--- test various cases
-
--- group by followed by another group by
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key + key, sum(cnt) from
-(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1
-group by key + key;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key + key, sum(cnt) from
-(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1
-group by key + key;
-
-SELECT * FROM outputTbl1_n8;
-
--- group by followed by a union
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT * FROM (
-SELECT key, count(1) FROM T1_n35 GROUP BY key
- UNION ALL
-SELECT key, count(1) FROM T1_n35 GROUP BY key
-) subq1;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT * FROM (
-SELECT key, count(1) FROM T1_n35 GROUP BY key
- UNION ALL
-SELECT key, count(1) FROM T1_n35 GROUP BY key
-) subq1;
-
-SELECT * FROM outputTbl1_n8;
-
--- group by followed by a union where one of the sub-queries is map-side group by
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT * FROM (
-SELECT key, count(1) FROM T1_n35 GROUP BY key
- UNION ALL
-SELECT key + key as key, count(1) FROM T1_n35 GROUP BY key + key
-) subq1;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT * FROM (
-SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key
- UNION ALL
-SELECT key + key as key, count(1) as cnt FROM T1_n35 GROUP BY key + key
-) subq1;
-
-SELECT * FROM outputTbl1_n8;
-
--- group by followed by a join
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT subq1.key, subq1.cnt+subq2.cnt FROM
-(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1
-JOIN
-(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq2
-ON subq1.key = subq2.key;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT subq1.key, subq1.cnt+subq2.cnt FROM
-(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq1
-JOIN
-(SELECT key, count(1) as cnt FROM T1_n35 GROUP BY key) subq2
-ON subq1.key = subq2.key;
-
-SELECT * FROM outputTbl1_n8;
-
--- group by followed by a join where one of the sub-queries can be performed in the mapper
-EXPLAIN EXTENDED
-SELECT * FROM
-(SELECT key, count(1) FROM T1_n35 GROUP BY key) subq1
-JOIN
-(SELECT key, val, count(1) FROM T1_n35 GROUP BY key, val) subq2
-ON subq1.key = subq2.key;
-
-CREATE TABLE T2_n23(key STRING, val STRING)
-CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE;
-
--- perform an insert to make sure there are 2 files
-INSERT OVERWRITE TABLE T2_n23 select key, val from T1_n35;
-
--- no mapside sort group by if the group by is a prefix of the sorted key
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key, count(1) FROM T2_n23 GROUP BY key;
-
-INSERT OVERWRITE TABLE outputTbl1_n8
-SELECT key, count(1) FROM T2_n23 GROUP BY key;
-
-SELECT * FROM outputTbl1_n8;
-
--- The plan should be converted to a map-side group by if the group by key contains a constant in between the
--- sorted keys
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl4_n0
-SELECT key, 1, val, count(1) FROM T2_n23 GROUP BY key, 1, val;
-
-INSERT OVERWRITE TABLE outputTbl4_n0
-SELECT key, 1, val, count(1) FROM T2_n23 GROUP BY key, 1, val;
-
-SELECT * FROM outputTbl4_n0;
-
-CREATE TABLE outputTbl5_n0(key1 int, key2 int, key3 string, key4 int, cnt int);
-
--- The plan should be converted to a map-side group by if the group by key contains a constant in between the
--- sorted keys followed by anything
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl5_n0
-SELECT key, 1, val, 2, count(1) FROM T2_n23 GROUP BY key, 1, val, 2;
-
-INSERT OVERWRITE TABLE outputTbl5_n0
-SELECT key, 1, val, 2, count(1) FROM T2_n23 GROUP BY key, 1, val, 2;
-
-SELECT * FROM outputTbl5_n0
-ORDER BY key1, key2, key3, key4;
-
--- contants from sub-queries should work fine
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl4_n0
-SELECT key, constant, val, count(1) from
-(SELECT key, 1 as constant, val from T2_n23)subq
-group by key, constant, val;
-
-INSERT OVERWRITE TABLE outputTbl4_n0
-SELECT key, constant, val, count(1) from
-(SELECT key, 1 as constant, val from T2_n23)subq
-group by key, constant, val;
-
-SELECT * FROM outputTbl4_n0;
-
--- multiple levels of contants from sub-queries should work fine
-EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl4_n0
-select key, constant3, val, count(1) from
-(
-SELECT key, constant as constant2, val, 2 as constant3 from
-(SELECT key, 1 as constant, val from T2_n23)subq
-)subq2
-group by key, constant3, val;
-
-INSERT OVERWRITE TABLE outputTbl4_n0
-select key, constant3, val, count(1) from
-(
-SELECT key, constant as constant2, val, 2 as constant3 from
-(SELECT key, 1 as constant, val from T2_n23)subq
-)subq2
-group by key, constant3, val;
-
-SELECT * FROM outputTbl4_n0;
-
-set hive.map.aggr=true;
-set hive.multigroupby.singlereducer=false;
-set mapred.reduce.tasks=31;
-
-CREATE TABLE DEST1_n30(key INT, cnt INT);
-CREATE TABLE DEST2_n6(key INT, val STRING, cnt INT);
-
-SET hive.exec.compress.intermediate=true;
-SET hive.exec.compress.output=true;
-
-EXPLAIN
-FROM T2_n23
-INSERT OVERWRITE TABLE DEST1_n30 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n6 SELECT key, val, count(1) GROUP BY key, val;
-
-FROM T2_n23
-INSERT OVERWRITE TABLE DEST1_n30 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n6 SELECT key, val, count(1) GROUP BY key, val;
-
-select * from DEST1_n30;
-select * from DEST2_n6;
-
--- multi-table insert with a sub-query
-EXPLAIN
-FROM (select key, val from T2_n23 where key = 8) x
-INSERT OVERWRITE TABLE DEST1_n30 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n6 SELECT key, val, count(1) GROUP BY key, val;
-
-FROM (select key, val from T2_n23 where key = 8) x
-INSERT OVERWRITE TABLE DEST1_n30 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE DEST2_n6 SELECT key, val, count(1) GROUP BY key, val;
-
-select * from DEST1_n30;
-select * from DEST2_n6;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q b/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
index 8919f3b..f5a2c59 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
@@ -3,7 +3,6 @@ set hive.exec.reducers.max = 10;
set hive.map.groupby.sorted=true;
set hive.groupby.skewindata=true;
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
-- SORT_QUERY_RESULTS
CREATE TABLE T1_n56(key STRING, val STRING)
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q
index 5479468..c9fd712 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_list_bucket.q
@@ -6,7 +6,6 @@ set mapred.input.dir.recursive=true;
-- This tests that bucketing/sorting metadata is not inferred for tables with list bucketing
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- create a skewed table
CREATE TABLE list_bucketing_table (key STRING, value STRING)
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/input12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input12.q b/ql/src/test/queries/clientpositive/input12.q
index b75ce20..16698ed 100644
--- a/ql/src/test/queries/clientpositive/input12.q
+++ b/ql/src/test/queries/clientpositive/input12.q
@@ -4,7 +4,6 @@ set mapreduce.framework.name=yarn;
set mapreduce.jobtracker.address=localhost:58;
set hive.exec.mode.local.auto=true;
--- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
CREATE TABLE dest1_n122(key INT, value STRING) STORED AS TEXTFILE;
CREATE TABLE dest2_n32(key INT, value STRING) STORED AS TEXTFILE;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/input12_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input12_hadoop20.q b/ql/src/test/queries/clientpositive/input12_hadoop20.q
deleted file mode 100644
index e9f2baf..0000000
--- a/ql/src/test/queries/clientpositive/input12_hadoop20.q
+++ /dev/null
@@ -1,24 +0,0 @@
---! qt:dataset:src
-set mapred.job.tracker=localhost:58;
-set hive.exec.mode.local.auto=true;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
-
-CREATE TABLE dest1_n88(key INT, value STRING) STORED AS TEXTFILE;
-CREATE TABLE dest2_n23(key INT, value STRING) STORED AS TEXTFILE;
-CREATE TABLE dest3_n2(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE;
-
-EXPLAIN
-FROM src
-INSERT OVERWRITE TABLE dest1_n88 SELECT src.* WHERE src.key < 100
-INSERT OVERWRITE TABLE dest2_n23 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
-INSERT OVERWRITE TABLE dest3_n2 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
-
-FROM src
-INSERT OVERWRITE TABLE dest1_n88 SELECT src.* WHERE src.key < 100
-INSERT OVERWRITE TABLE dest2_n23 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
-INSERT OVERWRITE TABLE dest3_n2 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
-
-SELECT dest1_n88.* FROM dest1_n88;
-SELECT dest2_n23.* FROM dest2_n23;
-SELECT dest3_n2.* FROM dest3_n2;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/input39.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input39.q b/ql/src/test/queries/clientpositive/input39.q
index b757f8e..2efdbc6 100644
--- a/ql/src/test/queries/clientpositive/input39.q
+++ b/ql/src/test/queries/clientpositive/input39.q
@@ -1,5 +1,4 @@
--! qt:dataset:src
--- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
create table t1_n121(key string, value string) partitioned by (ds string);
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/input39_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input39_hadoop20.q b/ql/src/test/queries/clientpositive/input39_hadoop20.q
deleted file mode 100644
index 26f2a6e..0000000
--- a/ql/src/test/queries/clientpositive/input39_hadoop20.q
+++ /dev/null
@@ -1,31 +0,0 @@
---! qt:dataset:src
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
-
-
-create table t1_n77(key string, value string) partitioned by (ds string);
-create table t2_n46(key string, value string) partitioned by (ds string);
-
-insert overwrite table t1_n77 partition (ds='1')
-select key, value from src;
-
-insert overwrite table t1_n77 partition (ds='2')
-select key, value from src;
-
-insert overwrite table t2_n46 partition (ds='1')
-select key, value from src;
-
-set hive.test.mode=true;
-set hive.mapred.mode=strict;
-set mapred.job.tracker=localhost:58;
-set hive.exec.mode.local.auto=true;
-
-explain
-select count(1) from t1_n77 join t2_n46 on t1_n77.key=t2_n46.key where t1_n77.ds='1' and t2_n46.ds='1';
-
-select count(1) from t1_n77 join t2_n46 on t1_n77.key=t2_n46.key where t1_n77.ds='1' and t2_n46.ds='1';
-
-set hive.test.mode=false;
-set mapred.job.tracker;
-
-
-
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/join14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join14.q b/ql/src/test/queries/clientpositive/join14.q
index e0f725c..20b914e 100644
--- a/ql/src/test/queries/clientpositive/join14.q
+++ b/ql/src/test/queries/clientpositive/join14.q
@@ -2,7 +2,6 @@
--! qt:dataset:src
set hive.mapred.mode=nonstrict;
-- SORT_QUERY_RESULTS
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
CREATE TABLE dest1_n164(c1 INT, c2 STRING) STORED AS TEXTFILE;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/join14_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join14_hadoop20.q b/ql/src/test/queries/clientpositive/join14_hadoop20.q
deleted file mode 100644
index 489ad0c..0000000
--- a/ql/src/test/queries/clientpositive/join14_hadoop20.q
+++ /dev/null
@@ -1,17 +0,0 @@
---! qt:dataset:srcpart
---! qt:dataset:src
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
-
-CREATE TABLE dest1_n49(c1 INT, c2 STRING) STORED AS TEXTFILE;
-
-set mapred.job.tracker=localhost:58;
-set hive.exec.mode.local.auto=true;
-
-EXPLAIN
-FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100
-INSERT OVERWRITE TABLE dest1_n49 SELECT src.key, srcpart.value;
-
-FROM src JOIN srcpart ON src.key = srcpart.key AND srcpart.ds = '2008-04-08' and src.key > 100
-INSERT OVERWRITE TABLE dest1_n49 SELECT src.key, srcpart.value;
-
-select dest1_n49.* from dest1_n49;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/lb_fs_stats.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/lb_fs_stats.q b/ql/src/test/queries/clientpositive/lb_fs_stats.q
index 7cadaf9..3bc3924 100644
--- a/ql/src/test/queries/clientpositive/lb_fs_stats.q
+++ b/ql/src/test/queries/clientpositive/lb_fs_stats.q
@@ -7,7 +7,6 @@ set mapred.input.dir.recursive=true;
set hive.stats.dbclass=fs;
-- Tests truncating a column from a list bucketing table
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
CREATE TABLE test_tab_n0 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_1.q b/ql/src/test/queries/clientpositive/list_bucket_dml_1.q
index 23e303f..40d5393 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_1.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_1.q
@@ -9,7 +9,6 @@ set mapred.input.dir.recursive=true;
-- list bucketing DML : dynamic partition and 2 stage query plan.
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- create a skewed table
create table list_bucketing_dynamic_part_n0 (key String, value String)
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_11.q b/ql/src/test/queries/clientpositive/list_bucket_dml_11.q
index e0acf2a..7cac6d7 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_11.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_11.q
@@ -6,7 +6,6 @@ set hive.merge.mapredfiles=false;
-- Ensure it works if skewed column is not the first column in the table columns
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- list bucketing DML: static partition. multiple skewed columns.
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_12.q b/ql/src/test/queries/clientpositive/list_bucket_dml_12.q
index d81355a..be02096 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_12.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_12.q
@@ -6,7 +6,6 @@ set hive.merge.mapredfiles=false;
-- Ensure it works if skewed column is not the first column in the table columns
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- SORT_QUERY_RESULTS
-- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_13.q b/ql/src/test/queries/clientpositive/list_bucket_dml_13.q
index 091cf0c..77b010d 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_13.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_13.q
@@ -6,7 +6,6 @@ set hive.merge.mapredfiles=false;
-- Ensure skewed value map has escaped directory name
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- SORT_QUERY_RESULTS
-- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_14.q b/ql/src/test/queries/clientpositive/list_bucket_dml_14.q
index a0f9c2c..f640853 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_14.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_14.q
@@ -8,7 +8,6 @@ set mapred.input.dir.recursive=true;
-- list bucketing DML : unpartitioned table and 2 stage query plan.
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- create a skewed table
create table list_bucketing (key String, value String)
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_2.q b/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
index b80e51d..6a46828 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
@@ -9,7 +9,6 @@ set hive.merge.mapfiles=false;
set hive.merge.mapredfiles=false;
set hive.stats.reliable=true;
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- SORT_QUERY_RESULTS
-- list bucketing DML: static partition. multiple skewed columns.
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_3.q b/ql/src/test/queries/clientpositive/list_bucket_dml_3.q
index 08c8ce2..0c1e43a 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_3.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_3.q
@@ -9,7 +9,6 @@ set mapred.input.dir.recursive=true;
-- list bucketing DML : static partition and 2 stage query plan.
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- create a skewed table
create table list_bucketing_static_part_n1 (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_4.q b/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
index a13915e..f14efe2 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
@@ -8,7 +8,6 @@ set mapred.input.dir.recursive=true;
set hive.merge.mapfiles=false;
set hive.merge.mapredfiles=false;
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- SORT_QUERY_RESULTS
-- list bucketing DML: static partition. multiple skewed columns. merge.
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_5.q b/ql/src/test/queries/clientpositive/list_bucket_dml_5.q
index bbfb317..996fa1d 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_5.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_5.q
@@ -9,7 +9,6 @@ set mapred.input.dir.recursive=true;
-- list bucketing DML: multiple skewed columns. 2 stages
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- SORT_QUERY_RESULTS
-- create a skewed table
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_6.q b/ql/src/test/queries/clientpositive/list_bucket_dml_6.q
index b9a526b..e761c86 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_6.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_6.q
@@ -46,7 +46,6 @@ set hive.merge.mapredfiles=false;
-- with merge
-- 118 000002_0
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- SORT_QUERY_RESULTS
-- create a skewed table
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_7.q b/ql/src/test/queries/clientpositive/list_bucket_dml_7.q
index 2c96407..ce87e4f 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_7.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_7.q
@@ -9,7 +9,6 @@ set hive.merge.mapfiles=false;
set hive.merge.mapredfiles=false;
set hive.merge.rcfile.block.level=true;
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- SORT_QUERY_RESULTS
-- list bucketing DML : dynamic partition (one level) , merge , one skewed column
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_8.q b/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
index 87f2624..ca19d79 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
@@ -48,7 +48,6 @@ set hive.merge.mapredfiles=false;
-- with merge
-- 118 000002_0
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- create a skewed table
create table list_bucketing_dynamic_part_n2 (key String, value String)
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_9.q b/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
index e130f05..70ef57b 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
@@ -8,7 +8,6 @@ set mapred.input.dir.recursive=true;
set hive.merge.mapfiles=false;
set hive.merge.mapredfiles=false;
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- SORT_QUERY_RESULTS
-- list bucketing DML: static partition. multiple skewed columns. merge.
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q
index 9e377d5..b8bf944 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_1.q
@@ -6,7 +6,6 @@ set hive.merge.mapfiles=false;
set hive.merge.mapredfiles=false;
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- List bucketing query logic test case. We simulate the directory structure by DML here.
-- Test condition:
[57/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 672ebf9..c6ce900 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -208,7 +208,7 @@ import org.slf4j.LoggerFactory;
public void alter_partitions(String db_name, String tbl_name, List<Partition> new_parts) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
- public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+ public AlterPartitionsResponse alter_partitions_with_environment_context(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
@@ -626,7 +626,7 @@ import org.slf4j.LoggerFactory;
public void alter_partitions(String db_name, String tbl_name, List<Partition> new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
- public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ public void alter_partitions_with_environment_context(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -3401,33 +3401,33 @@ import org.slf4j.LoggerFactory;
return;
}
- public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException
+ public AlterPartitionsResponse alter_partitions_with_environment_context(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException
{
- send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context);
- recv_alter_partitions_with_environment_context();
+ send_alter_partitions_with_environment_context(req);
+ return recv_alter_partitions_with_environment_context();
}
- public void send_alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context) throws org.apache.thrift.TException
+ public void send_alter_partitions_with_environment_context(AlterPartitionsRequest req) throws org.apache.thrift.TException
{
alter_partitions_with_environment_context_args args = new alter_partitions_with_environment_context_args();
- args.setDb_name(db_name);
- args.setTbl_name(tbl_name);
- args.setNew_parts(new_parts);
- args.setEnvironment_context(environment_context);
+ args.setReq(req);
sendBase("alter_partitions_with_environment_context", args);
}
- public void recv_alter_partitions_with_environment_context() throws InvalidOperationException, MetaException, org.apache.thrift.TException
+ public AlterPartitionsResponse recv_alter_partitions_with_environment_context() throws InvalidOperationException, MetaException, org.apache.thrift.TException
{
alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result();
receiveBase(result, "alter_partitions_with_environment_context");
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
if (result.o1 != null) {
throw result.o1;
}
if (result.o2 != null) {
throw result.o2;
}
- return;
+ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "alter_partitions_with_environment_context failed: unknown result");
}
public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException
@@ -9869,44 +9869,35 @@ import org.slf4j.LoggerFactory;
}
}
- public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ public void alter_partitions_with_environment_context(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
- alter_partitions_with_environment_context_call method_call = new alter_partitions_with_environment_context_call(db_name, tbl_name, new_parts, environment_context, resultHandler, this, ___protocolFactory, ___transport);
+ alter_partitions_with_environment_context_call method_call = new alter_partitions_with_environment_context_call(req, resultHandler, this, ___protocolFactory, ___transport);
this.___currentMethod = method_call;
___manager.call(method_call);
}
@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_call extends org.apache.thrift.async.TAsyncMethodCall {
- private String db_name;
- private String tbl_name;
- private List<Partition> new_parts;
- private EnvironmentContext environment_context;
- public alter_partitions_with_environment_context_call(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ private AlterPartitionsRequest req;
+ public alter_partitions_with_environment_context_call(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
super(client, protocolFactory, transport, resultHandler, false);
- this.db_name = db_name;
- this.tbl_name = tbl_name;
- this.new_parts = new_parts;
- this.environment_context = environment_context;
+ this.req = req;
}
public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_partitions_with_environment_context", org.apache.thrift.protocol.TMessageType.CALL, 0));
alter_partitions_with_environment_context_args args = new alter_partitions_with_environment_context_args();
- args.setDb_name(db_name);
- args.setTbl_name(tbl_name);
- args.setNew_parts(new_parts);
- args.setEnvironment_context(environment_context);
+ args.setReq(req);
args.write(prot);
prot.writeMessageEnd();
}
- public void getResult() throws InvalidOperationException, MetaException, org.apache.thrift.TException {
+ public AlterPartitionsResponse getResult() throws InvalidOperationException, MetaException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
- (new Client(prot)).recv_alter_partitions_with_environment_context();
+ return (new Client(prot)).recv_alter_partitions_with_environment_context();
}
}
@@ -16414,7 +16405,7 @@ import org.slf4j.LoggerFactory;
public alter_partitions_with_environment_context_result getResult(I iface, alter_partitions_with_environment_context_args args) throws org.apache.thrift.TException {
alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result();
try {
- iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context);
+ result.success = iface.alter_partitions_with_environment_context(args.req);
} catch (InvalidOperationException o1) {
result.o1 = o1;
} catch (MetaException o2) {
@@ -24951,7 +24942,7 @@ import org.slf4j.LoggerFactory;
}
}
- @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partitions_with_environment_context_args, Void> {
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partitions_with_environment_context_args, AlterPartitionsResponse> {
public alter_partitions_with_environment_context() {
super("alter_partitions_with_environment_context");
}
@@ -24960,11 +24951,12 @@ import org.slf4j.LoggerFactory;
return new alter_partitions_with_environment_context_args();
}
- public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+ public AsyncMethodCallback<AlterPartitionsResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
final org.apache.thrift.AsyncProcessFunction fcall = this;
- return new AsyncMethodCallback<Void>() {
- public void onComplete(Void o) {
+ return new AsyncMethodCallback<AlterPartitionsResponse>() {
+ public void onComplete(AlterPartitionsResponse o) {
alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result();
+ result.success = o;
try {
fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
return;
@@ -25007,8 +24999,8 @@ import org.slf4j.LoggerFactory;
return false;
}
- public void start(I iface, alter_partitions_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
- iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context,resultHandler);
+ public void start(I iface, alter_partitions_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<AlterPartitionsResponse> resultHandler) throws TException {
+ iface.alter_partitions_with_environment_context(args.req,resultHandler);
}
}
@@ -42252,13 +42244,13 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list928 = iprot.readListBegin();
- struct.success = new ArrayList<String>(_list928.size);
- String _elem929;
- for (int _i930 = 0; _i930 < _list928.size; ++_i930)
+ org.apache.thrift.protocol.TList _list936 = iprot.readListBegin();
+ struct.success = new ArrayList<String>(_list936.size);
+ String _elem937;
+ for (int _i938 = 0; _i938 < _list936.size; ++_i938)
{
- _elem929 = iprot.readString();
- struct.success.add(_elem929);
+ _elem937 = iprot.readString();
+ struct.success.add(_elem937);
}
iprot.readListEnd();
}
@@ -42293,9 +42285,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
- for (String _iter931 : struct.success)
+ for (String _iter939 : struct.success)
{
- oprot.writeString(_iter931);
+ oprot.writeString(_iter939);
}
oprot.writeListEnd();
}
@@ -42334,9 +42326,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (String _iter932 : struct.success)
+ for (String _iter940 : struct.success)
{
- oprot.writeString(_iter932);
+ oprot.writeString(_iter940);
}
}
}
@@ -42351,13 +42343,13 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
- struct.success = new ArrayList<String>(_list933.size);
- String _elem934;
- for (int _i935 = 0; _i935 < _list933.size; ++_i935)
+ org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+ struct.success = new ArrayList<String>(_list941.size);
+ String _elem942;
+ for (int _i943 = 0; _i943 < _list941.size; ++_i943)
{
- _elem934 = iprot.readString();
- struct.success.add(_elem934);
+ _elem942 = iprot.readString();
+ struct.success.add(_elem942);
}
}
struct.setSuccessIsSet(true);
@@ -43011,13 +43003,13 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list936 = iprot.readListBegin();
- struct.success = new ArrayList<String>(_list936.size);
- String _elem937;
- for (int _i938 = 0; _i938 < _list936.size; ++_i938)
+ org.apache.thrift.protocol.TList _list944 = iprot.readListBegin();
+ struct.success = new ArrayList<String>(_list944.size);
+ String _elem945;
+ for (int _i946 = 0; _i946 < _list944.size; ++_i946)
{
- _elem937 = iprot.readString();
- struct.success.add(_elem937);
+ _elem945 = iprot.readString();
+ struct.success.add(_elem945);
}
iprot.readListEnd();
}
@@ -43052,9 +43044,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
- for (String _iter939 : struct.success)
+ for (String _iter947 : struct.success)
{
- oprot.writeString(_iter939);
+ oprot.writeString(_iter947);
}
oprot.writeListEnd();
}
@@ -43093,9 +43085,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (String _iter940 : struct.success)
+ for (String _iter948 : struct.success)
{
- oprot.writeString(_iter940);
+ oprot.writeString(_iter948);
}
}
}
@@ -43110,13 +43102,13 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
- struct.success = new ArrayList<String>(_list941.size);
- String _elem942;
- for (int _i943 = 0; _i943 < _list941.size; ++_i943)
+ org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+ struct.success = new ArrayList<String>(_list949.size);
+ String _elem950;
+ for (int _i951 = 0; _i951 < _list949.size; ++_i951)
{
- _elem942 = iprot.readString();
- struct.success.add(_elem942);
+ _elem950 = iprot.readString();
+ struct.success.add(_elem950);
}
}
struct.setSuccessIsSet(true);
@@ -47723,16 +47715,16 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
{
- org.apache.thrift.protocol.TMap _map944 = iprot.readMapBegin();
- struct.success = new HashMap<String,Type>(2*_map944.size);
- String _key945;
- Type _val946;
- for (int _i947 = 0; _i947 < _map944.size; ++_i947)
+ org.apache.thrift.protocol.TMap _map952 = iprot.readMapBegin();
+ struct.success = new HashMap<String,Type>(2*_map952.size);
+ String _key953;
+ Type _val954;
+ for (int _i955 = 0; _i955 < _map952.size; ++_i955)
{
- _key945 = iprot.readString();
- _val946 = new Type();
- _val946.read(iprot);
- struct.success.put(_key945, _val946);
+ _key953 = iprot.readString();
+ _val954 = new Type();
+ _val954.read(iprot);
+ struct.success.put(_key953, _val954);
}
iprot.readMapEnd();
}
@@ -47767,10 +47759,10 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
- for (Map.Entry<String, Type> _iter948 : struct.success.entrySet())
+ for (Map.Entry<String, Type> _iter956 : struct.success.entrySet())
{
- oprot.writeString(_iter948.getKey());
- _iter948.getValue().write(oprot);
+ oprot.writeString(_iter956.getKey());
+ _iter956.getValue().write(oprot);
}
oprot.writeMapEnd();
}
@@ -47809,10 +47801,10 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (Map.Entry<String, Type> _iter949 : struct.success.entrySet())
+ for (Map.Entry<String, Type> _iter957 : struct.success.entrySet())
{
- oprot.writeString(_iter949.getKey());
- _iter949.getValue().write(oprot);
+ oprot.writeString(_iter957.getKey());
+ _iter957.getValue().write(oprot);
}
}
}
@@ -47827,16 +47819,16 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TMap _map950 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.success = new HashMap<String,Type>(2*_map950.size);
- String _key951;
- Type _val952;
- for (int _i953 = 0; _i953 < _map950.size; ++_i953)
+ org.apache.thrift.protocol.TMap _map958 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.success = new HashMap<String,Type>(2*_map958.size);
+ String _key959;
+ Type _val960;
+ for (int _i961 = 0; _i961 < _map958.size; ++_i961)
{
- _key951 = iprot.readString();
- _val952 = new Type();
- _val952.read(iprot);
- struct.success.put(_key951, _val952);
+ _key959 = iprot.readString();
+ _val960 = new Type();
+ _val960.read(iprot);
+ struct.success.put(_key959, _val960);
}
}
struct.setSuccessIsSet(true);
@@ -48871,14 +48863,14 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list954 = iprot.readListBegin();
- struct.success = new ArrayList<FieldSchema>(_list954.size);
- FieldSchema _elem955;
- for (int _i956 = 0; _i956 < _list954.size; ++_i956)
+ org.apache.thrift.protocol.TList _list962 = iprot.readListBegin();
+ struct.success = new ArrayList<FieldSchema>(_list962.size);
+ FieldSchema _elem963;
+ for (int _i964 = 0; _i964 < _list962.size; ++_i964)
{
- _elem955 = new FieldSchema();
- _elem955.read(iprot);
- struct.success.add(_elem955);
+ _elem963 = new FieldSchema();
+ _elem963.read(iprot);
+ struct.success.add(_elem963);
}
iprot.readListEnd();
}
@@ -48931,9 +48923,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
- for (FieldSchema _iter957 : struct.success)
+ for (FieldSchema _iter965 : struct.success)
{
- _iter957.write(oprot);
+ _iter965.write(oprot);
}
oprot.writeListEnd();
}
@@ -48988,9 +48980,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (FieldSchema _iter958 : struct.success)
+ for (FieldSchema _iter966 : struct.success)
{
- _iter958.write(oprot);
+ _iter966.write(oprot);
}
}
}
@@ -49011,14 +49003,14 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(4);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TList _list959 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.success = new ArrayList<FieldSchema>(_list959.size);
- FieldSchema _elem960;
- for (int _i961 = 0; _i961 < _list959.size; ++_i961)
+ org.apache.thrift.protocol.TList _list967 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.success = new ArrayList<FieldSchema>(_list967.size);
+ FieldSchema _elem968;
+ for (int _i969 = 0; _i969 < _list967.size; ++_i969)
{
- _elem960 = new FieldSchema();
- _elem960.read(iprot);
- struct.success.add(_elem960);
+ _elem968 = new FieldSchema();
+ _elem968.read(iprot);
+ struct.success.add(_elem968);
}
}
struct.setSuccessIsSet(true);
@@ -50172,14 +50164,14 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list962 = iprot.readListBegin();
- struct.success = new ArrayList<FieldSchema>(_list962.size);
- FieldSchema _elem963;
- for (int _i964 = 0; _i964 < _list962.size; ++_i964)
+ org.apache.thrift.protocol.TList _list970 = iprot.readListBegin();
+ struct.success = new ArrayList<FieldSchema>(_list970.size);
+ FieldSchema _elem971;
+ for (int _i972 = 0; _i972 < _list970.size; ++_i972)
{
- _elem963 = new FieldSchema();
- _elem963.read(iprot);
- struct.success.add(_elem963);
+ _elem971 = new FieldSchema();
+ _elem971.read(iprot);
+ struct.success.add(_elem971);
}
iprot.readListEnd();
}
@@ -50232,9 +50224,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
- for (FieldSchema _iter965 : struct.success)
+ for (FieldSchema _iter973 : struct.success)
{
- _iter965.write(oprot);
+ _iter973.write(oprot);
}
oprot.writeListEnd();
}
@@ -50289,9 +50281,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (FieldSchema _iter966 : struct.success)
+ for (FieldSchema _iter974 : struct.success)
{
- _iter966.write(oprot);
+ _iter974.write(oprot);
}
}
}
@@ -50312,14 +50304,14 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(4);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TList _list967 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.success = new ArrayList<FieldSchema>(_list967.size);
- FieldSchema _elem968;
- for (int _i969 = 0; _i969 < _list967.size; ++_i969)
+ org.apache.thrift.protocol.TList _list975 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.success = new ArrayList<FieldSchema>(_list975.size);
+ FieldSchema _elem976;
+ for (int _i977 = 0; _i977 < _list975.size; ++_i977)
{
- _elem968 = new FieldSchema();
- _elem968.read(iprot);
- struct.success.add(_elem968);
+ _elem976 = new FieldSchema();
+ _elem976.read(iprot);
+ struct.success.add(_elem976);
}
}
struct.setSuccessIsSet(true);
@@ -51364,14 +51356,14 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list970 = iprot.readListBegin();
- struct.success = new ArrayList<FieldSchema>(_list970.size);
- FieldSchema _elem971;
- for (int _i972 = 0; _i972 < _list970.size; ++_i972)
+ org.apache.thrift.protocol.TList _list978 = iprot.readListBegin();
+ struct.success = new ArrayList<FieldSchema>(_list978.size);
+ FieldSchema _elem979;
+ for (int _i980 = 0; _i980 < _list978.size; ++_i980)
{
- _elem971 = new FieldSchema();
- _elem971.read(iprot);
- struct.success.add(_elem971);
+ _elem979 = new FieldSchema();
+ _elem979.read(iprot);
+ struct.success.add(_elem979);
}
iprot.readListEnd();
}
@@ -51424,9 +51416,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
- for (FieldSchema _iter973 : struct.success)
+ for (FieldSchema _iter981 : struct.success)
{
- _iter973.write(oprot);
+ _iter981.write(oprot);
}
oprot.writeListEnd();
}
@@ -51481,9 +51473,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (FieldSchema _iter974 : struct.success)
+ for (FieldSchema _iter982 : struct.success)
{
- _iter974.write(oprot);
+ _iter982.write(oprot);
}
}
}
@@ -51504,14 +51496,14 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(4);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TList _list975 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.success = new ArrayList<FieldSchema>(_list975.size);
- FieldSchema _elem976;
- for (int _i977 = 0; _i977 < _list975.size; ++_i977)
+ org.apache.thrift.protocol.TList _list983 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.success = new ArrayList<FieldSchema>(_list983.size);
+ FieldSchema _elem984;
+ for (int _i985 = 0; _i985 < _list983.size; ++_i985)
{
- _elem976 = new FieldSchema();
- _elem976.read(iprot);
- struct.success.add(_elem976);
+ _elem984 = new FieldSchema();
+ _elem984.read(iprot);
+ struct.success.add(_elem984);
}
}
struct.setSuccessIsSet(true);
@@ -52665,14 +52657,14 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list978 = iprot.readListBegin();
- struct.success = new ArrayList<FieldSchema>(_list978.size);
- FieldSchema _elem979;
- for (int _i980 = 0; _i980 < _list978.size; ++_i980)
+ org.apache.thrift.protocol.TList _list986 = iprot.readListBegin();
+ struct.success = new ArrayList<FieldSchema>(_list986.size);
+ FieldSchema _elem987;
+ for (int _i988 = 0; _i988 < _list986.size; ++_i988)
{
- _elem979 = new FieldSchema();
- _elem979.read(iprot);
- struct.success.add(_elem979);
+ _elem987 = new FieldSchema();
+ _elem987.read(iprot);
+ struct.success.add(_elem987);
}
iprot.readListEnd();
}
@@ -52725,9 +52717,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
- for (FieldSchema _iter981 : struct.success)
+ for (FieldSchema _iter989 : struct.success)
{
- _iter981.write(oprot);
+ _iter989.write(oprot);
}
oprot.writeListEnd();
}
@@ -52782,9 +52774,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (FieldSchema _iter982 : struct.success)
+ for (FieldSchema _iter990 : struct.success)
{
- _iter982.write(oprot);
+ _iter990.write(oprot);
}
}
}
@@ -52805,14 +52797,14 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(4);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TList _list983 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.success = new ArrayList<FieldSchema>(_list983.size);
- FieldSchema _elem984;
- for (int _i985 = 0; _i985 < _list983.size; ++_i985)
+ org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.success = new ArrayList<FieldSchema>(_list991.size);
+ FieldSchema _elem992;
+ for (int _i993 = 0; _i993 < _list991.size; ++_i993)
{
- _elem984 = new FieldSchema();
- _elem984.read(iprot);
- struct.success.add(_elem984);
+ _elem992 = new FieldSchema();
+ _elem992.read(iprot);
+ struct.success.add(_elem992);
}
}
struct.setSuccessIsSet(true);
@@ -55941,14 +55933,14 @@ import org.slf4j.LoggerFactory;
case 2: // PRIMARY_KEYS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list986 = iprot.readListBegin();
- struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list986.size);
- SQLPrimaryKey _elem987;
- for (int _i988 = 0; _i988 < _list986.size; ++_i988)
+ org.apache.thrift.protocol.TList _list994 = iprot.readListBegin();
+ struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list994.size);
+ SQLPrimaryKey _elem995;
+ for (int _i996 = 0; _i996 < _list994.size; ++_i996)
{
- _elem987 = new SQLPrimaryKey();
- _elem987.read(iprot);
- struct.primaryKeys.add(_elem987);
+ _elem995 = new SQLPrimaryKey();
+ _elem995.read(iprot);
+ struct.primaryKeys.add(_elem995);
}
iprot.readListEnd();
}
@@ -55960,14 +55952,14 @@ import org.slf4j.LoggerFactory;
case 3: // FOREIGN_KEYS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list989 = iprot.readListBegin();
- struct.foreignKeys = new ArrayList<SQLForeignKey>(_list989.size);
- SQLForeignKey _elem990;
- for (int _i991 = 0; _i991 < _list989.size; ++_i991)
+ org.apache.thrift.protocol.TList _list997 = iprot.readListBegin();
+ struct.foreignKeys = new ArrayList<SQLForeignKey>(_list997.size);
+ SQLForeignKey _elem998;
+ for (int _i999 = 0; _i999 < _list997.size; ++_i999)
{
- _elem990 = new SQLForeignKey();
- _elem990.read(iprot);
- struct.foreignKeys.add(_elem990);
+ _elem998 = new SQLForeignKey();
+ _elem998.read(iprot);
+ struct.foreignKeys.add(_elem998);
}
iprot.readListEnd();
}
@@ -55979,14 +55971,14 @@ import org.slf4j.LoggerFactory;
case 4: // UNIQUE_CONSTRAINTS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list992 = iprot.readListBegin();
- struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list992.size);
- SQLUniqueConstraint _elem993;
- for (int _i994 = 0; _i994 < _list992.size; ++_i994)
+ org.apache.thrift.protocol.TList _list1000 = iprot.readListBegin();
+ struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1000.size);
+ SQLUniqueConstraint _elem1001;
+ for (int _i1002 = 0; _i1002 < _list1000.size; ++_i1002)
{
- _elem993 = new SQLUniqueConstraint();
- _elem993.read(iprot);
- struct.uniqueConstraints.add(_elem993);
+ _elem1001 = new SQLUniqueConstraint();
+ _elem1001.read(iprot);
+ struct.uniqueConstraints.add(_elem1001);
}
iprot.readListEnd();
}
@@ -55998,14 +55990,14 @@ import org.slf4j.LoggerFactory;
case 5: // NOT_NULL_CONSTRAINTS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list995 = iprot.readListBegin();
- struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list995.size);
- SQLNotNullConstraint _elem996;
- for (int _i997 = 0; _i997 < _list995.size; ++_i997)
+ org.apache.thrift.protocol.TList _list1003 = iprot.readListBegin();
+ struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1003.size);
+ SQLNotNullConstraint _elem1004;
+ for (int _i1005 = 0; _i1005 < _list1003.size; ++_i1005)
{
- _elem996 = new SQLNotNullConstraint();
- _elem996.read(iprot);
- struct.notNullConstraints.add(_elem996);
+ _elem1004 = new SQLNotNullConstraint();
+ _elem1004.read(iprot);
+ struct.notNullConstraints.add(_elem1004);
}
iprot.readListEnd();
}
@@ -56017,14 +56009,14 @@ import org.slf4j.LoggerFactory;
case 6: // DEFAULT_CONSTRAINTS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list998 = iprot.readListBegin();
- struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list998.size);
- SQLDefaultConstraint _elem999;
- for (int _i1000 = 0; _i1000 < _list998.size; ++_i1000)
+ org.apache.thrift.protocol.TList _list1006 = iprot.readListBegin();
+ struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1006.size);
+ SQLDefaultConstraint _elem1007;
+ for (int _i1008 = 0; _i1008 < _list1006.size; ++_i1008)
{
- _elem999 = new SQLDefaultConstraint();
- _elem999.read(iprot);
- struct.defaultConstraints.add(_elem999);
+ _elem1007 = new SQLDefaultConstraint();
+ _elem1007.read(iprot);
+ struct.defaultConstraints.add(_elem1007);
}
iprot.readListEnd();
}
@@ -56036,14 +56028,14 @@ import org.slf4j.LoggerFactory;
case 7: // CHECK_CONSTRAINTS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list1001 = iprot.readListBegin();
- struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1001.size);
- SQLCheckConstraint _elem1002;
- for (int _i1003 = 0; _i1003 < _list1001.size; ++_i1003)
+ org.apache.thrift.protocol.TList _list1009 = iprot.readListBegin();
+ struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1009.size);
+ SQLCheckConstraint _elem1010;
+ for (int _i1011 = 0; _i1011 < _list1009.size; ++_i1011)
{
- _elem1002 = new SQLCheckConstraint();
- _elem1002.read(iprot);
- struct.checkConstraints.add(_elem1002);
+ _elem1010 = new SQLCheckConstraint();
+ _elem1010.read(iprot);
+ struct.checkConstraints.add(_elem1010);
}
iprot.readListEnd();
}
@@ -56074,9 +56066,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size()));
- for (SQLPrimaryKey _iter1004 : struct.primaryKeys)
+ for (SQLPrimaryKey _iter1012 : struct.primaryKeys)
{
- _iter1004.write(oprot);
+ _iter1012.write(oprot);
}
oprot.writeListEnd();
}
@@ -56086,9 +56078,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size()));
- for (SQLForeignKey _iter1005 : struct.foreignKeys)
+ for (SQLForeignKey _iter1013 : struct.foreignKeys)
{
- _iter1005.write(oprot);
+ _iter1013.write(oprot);
}
oprot.writeListEnd();
}
@@ -56098,9 +56090,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size()));
- for (SQLUniqueConstraint _iter1006 : struct.uniqueConstraints)
+ for (SQLUniqueConstraint _iter1014 : struct.uniqueConstraints)
{
- _iter1006.write(oprot);
+ _iter1014.write(oprot);
}
oprot.writeListEnd();
}
@@ -56110,9 +56102,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size()));
- for (SQLNotNullConstraint _iter1007 : struct.notNullConstraints)
+ for (SQLNotNullConstraint _iter1015 : struct.notNullConstraints)
{
- _iter1007.write(oprot);
+ _iter1015.write(oprot);
}
oprot.writeListEnd();
}
@@ -56122,9 +56114,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size()));
- for (SQLDefaultConstraint _iter1008 : struct.defaultConstraints)
+ for (SQLDefaultConstraint _iter1016 : struct.defaultConstraints)
{
- _iter1008.write(oprot);
+ _iter1016.write(oprot);
}
oprot.writeListEnd();
}
@@ -56134,9 +56126,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size()));
- for (SQLCheckConstraint _iter1009 : struct.checkConstraints)
+ for (SQLCheckConstraint _iter1017 : struct.checkConstraints)
{
- _iter1009.write(oprot);
+ _iter1017.write(oprot);
}
oprot.writeListEnd();
}
@@ -56188,54 +56180,54 @@ import org.slf4j.LoggerFactory;
if (struct.isSetPrimaryKeys()) {
{
oprot.writeI32(struct.primaryKeys.size());
- for (SQLPrimaryKey _iter1010 : struct.primaryKeys)
+ for (SQLPrimaryKey _iter1018 : struct.primaryKeys)
{
- _iter1010.write(oprot);
+ _iter1018.write(oprot);
}
}
}
if (struct.isSetForeignKeys()) {
{
oprot.writeI32(struct.foreignKeys.size());
- for (SQLForeignKey _iter1011 : struct.foreignKeys)
+ for (SQLForeignKey _iter1019 : struct.foreignKeys)
{
- _iter1011.write(oprot);
+ _iter1019.write(oprot);
}
}
}
if (struct.isSetUniqueConstraints()) {
{
oprot.writeI32(struct.uniqueConstraints.size());
- for (SQLUniqueConstraint _iter1012 : struct.uniqueConstraints)
+ for (SQLUniqueConstraint _iter1020 : struct.uniqueConstraints)
{
- _iter1012.write(oprot);
+ _iter1020.write(oprot);
}
}
}
if (struct.isSetNotNullConstraints()) {
{
oprot.writeI32(struct.notNullConstraints.size());
- for (SQLNotNullConstraint _iter1013 : struct.notNullConstraints)
+ for (SQLNotNullConstraint _iter1021 : struct.notNullConstraints)
{
- _iter1013.write(oprot);
+ _iter1021.write(oprot);
}
}
}
if (struct.isSetDefaultConstraints()) {
{
oprot.writeI32(struct.defaultConstraints.size());
- for (SQLDefaultConstraint _iter1014 : struct.defaultConstraints)
+ for (SQLDefaultConstraint _iter1022 : struct.defaultConstraints)
{
- _iter1014.write(oprot);
+ _iter1022.write(oprot);
}
}
}
if (struct.isSetCheckConstraints()) {
{
oprot.writeI32(struct.checkConstraints.size());
- for (SQLCheckConstraint _iter1015 : struct.checkConstraints)
+ for (SQLCheckConstraint _iter1023 : struct.checkConstraints)
{
- _iter1015.write(oprot);
+ _iter1023.write(oprot);
}
}
}
@@ -56252,84 +56244,84 @@ import org.slf4j.LoggerFactory;
}
if (incoming.get(1)) {
{
- org.apache.thrift.protocol.TList _list1016 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1016.size);
- SQLPrimaryKey _elem1017;
- for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018)
+ org.apache.thrift.protocol.TList _list1024 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1024.size);
+ SQLPrimaryKey _elem1025;
+ for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026)
{
- _elem1017 = new SQLPrimaryKey();
- _elem1017.read(iprot);
- struct.primaryKeys.add(_elem1017);
+ _elem1025 = new SQLPrimaryKey();
+ _elem1025.read(iprot);
+ struct.primaryKeys.add(_elem1025);
}
}
struct.setPrimaryKeysIsSet(true);
}
if (incoming.get(2)) {
{
- org.apache.thrift.protocol.TList _list1019 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1019.size);
- SQLForeignKey _elem1020;
- for (int _i1021 = 0; _i1021 < _list1019.size; ++_i1021)
+ org.apache.thrift.protocol.TList _list1027 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1027.size);
+ SQLForeignKey _elem1028;
+ for (int _i1029 = 0; _i1029 < _list1027.size; ++_i1029)
{
- _elem1020 = new SQLForeignKey();
- _elem1020.read(iprot);
- struct.foreignKeys.add(_elem1020);
+ _elem1028 = new SQLForeignKey();
+ _elem1028.read(iprot);
+ struct.foreignKeys.add(_elem1028);
}
}
struct.setForeignKeysIsSet(true);
}
if (incoming.get(3)) {
{
- org.apache.thrift.protocol.TList _list1022 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1022.size);
- SQLUniqueConstraint _elem1023;
- for (int _i1024 = 0; _i1024 < _list1022.size; ++_i1024)
+ org.apache.thrift.protocol.TList _list1030 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1030.size);
+ SQLUniqueConstraint _elem1031;
+ for (int _i1032 = 0; _i1032 < _list1030.size; ++_i1032)
{
- _elem1023 = new SQLUniqueConstraint();
- _elem1023.read(iprot);
- struct.uniqueConstraints.add(_elem1023);
+ _elem1031 = new SQLUniqueConstraint();
+ _elem1031.read(iprot);
+ struct.uniqueConstraints.add(_elem1031);
}
}
struct.setUniqueConstraintsIsSet(true);
}
if (incoming.get(4)) {
{
- org.apache.thrift.protocol.TList _list1025 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1025.size);
- SQLNotNullConstraint _elem1026;
- for (int _i1027 = 0; _i1027 < _list1025.size; ++_i1027)
+ org.apache.thrift.protocol.TList _list1033 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1033.size);
+ SQLNotNullConstraint _elem1034;
+ for (int _i1035 = 0; _i1035 < _list1033.size; ++_i1035)
{
- _elem1026 = new SQLNotNullConstraint();
- _elem1026.read(iprot);
- struct.notNullConstraints.add(_elem1026);
+ _elem1034 = new SQLNotNullConstraint();
+ _elem1034.read(iprot);
+ struct.notNullConstraints.add(_elem1034);
}
}
struct.setNotNullConstraintsIsSet(true);
}
if (incoming.get(5)) {
{
- org.apache.thrift.protocol.TList _list1028 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1028.size);
- SQLDefaultConstraint _elem1029;
- for (int _i1030 = 0; _i1030 < _list1028.size; ++_i1030)
+ org.apache.thrift.protocol.TList _list1036 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1036.size);
+ SQLDefaultConstraint _elem1037;
+ for (int _i1038 = 0; _i1038 < _list1036.size; ++_i1038)
{
- _elem1029 = new SQLDefaultConstraint();
- _elem1029.read(iprot);
- struct.defaultConstraints.add(_elem1029);
+ _elem1037 = new SQLDefaultConstraint();
+ _elem1037.read(iprot);
+ struct.defaultConstraints.add(_elem1037);
}
}
struct.setDefaultConstraintsIsSet(true);
}
if (incoming.get(6)) {
{
- org.apache.thrift.protocol.TList _list1031 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1031.size);
- SQLCheckConstraint _elem1032;
- for (int _i1033 = 0; _i1033 < _list1031.size; ++_i1033)
+ org.apache.thrift.protocol.TList _list1039 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1039.size);
+ SQLCheckConstraint _elem1040;
+ for (int _i1041 = 0; _i1041 < _list1039.size; ++_i1041)
{
- _elem1032 = new SQLCheckConstraint();
- _elem1032.read(iprot);
- struct.checkConstraints.add(_elem1032);
+ _elem1040 = new SQLCheckConstraint();
+ _elem1040.read(iprot);
+ struct.checkConstraints.add(_elem1040);
}
}
struct.setCheckConstraintsIsSet(true);
@@ -65479,13 +65471,13 @@ import org.slf4j.LoggerFactory;
case 3: // PART_NAMES
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list1034 = iprot.readListBegin();
- struct.partNames = new ArrayList<String>(_list1034.size);
- String _elem1035;
- for (int _i1036 = 0; _i1036 < _list1034.size; ++_i1036)
+ org.apache.thrift.protocol.TList _list1042 = iprot.readListBegin();
+ struct.partNames = new ArrayList<String>(_list1042.size);
+ String _elem1043;
+ for (int _i1044 = 0; _i1044 < _list1042.size; ++_i1044)
{
- _elem1035 = iprot.readString();
- struct.partNames.add(_elem1035);
+ _elem1043 = iprot.readString();
+ struct.partNames.add(_elem1043);
}
iprot.readListEnd();
}
@@ -65521,9 +65513,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(PART_NAMES_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size()));
- for (String _iter1037 : struct.partNames)
+ for (String _iter1045 : struct.partNames)
{
- oprot.writeString(_iter1037);
+ oprot.writeString(_iter1045);
}
oprot.writeListEnd();
}
@@ -65566,9 +65558,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetPartNames()) {
{
oprot.writeI32(struct.partNames.size());
- for (String _iter1038 : struct.partNames)
+ for (String _iter1046 : struct.partNames)
{
- oprot.writeString(_iter1038);
+ oprot.writeString(_iter1046);
}
}
}
@@ -65588,13 +65580,13 @@ import org.slf4j.LoggerFactory;
}
if (incoming.get(2)) {
{
- org.apache.thrift.protocol.TList _list1039 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
- struct.partNames = new ArrayList<String>(_list1039.size);
- String _elem1040;
- for (int _i1041 = 0; _i1041 < _list1039.size; ++_i1041)
+ org.apache.thrift.protocol.TList _list1047 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+ struct.partNames = new ArrayList<String>(_list1047.size);
+ String _elem1048;
+ for (int _i1049 = 0; _i1049 < _list1047.size; ++_i1049)
{
- _elem1040 = iprot.readString();
- struct.partNames.add(_elem1040);
+ _elem1048 = iprot.readString();
+ struct.partNames.add(_elem1048);
}
}
struct.setPartNamesIsSet(true);
@@ -66819,13 +66811,13 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list1042 = iprot.readListBegin();
- struct.success = new ArrayList<String>(_list1042.size);
- String _elem1043;
- for (int _i1044 = 0; _i1044 < _list1042.size; ++_i1044)
+ org.apache.thrift.protocol.TList _list1050 = iprot.readListBegin();
+ struct.success = new ArrayList<String>(_list1050.size);
+ String _elem1051;
+ for (int _i1052 = 0; _i1052 < _list1050.size; ++_i1052)
{
- _elem1043 = iprot.readString();
- struct.success.add(_elem1043);
+ _elem1051 = iprot.readString();
+ struct.success.add(_elem1051);
}
iprot.readListEnd();
}
@@ -66860,9 +66852,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
- for (String _iter1045 : struct.success)
+ for (String _iter1053 : struct.success)
{
- oprot.writeString(_iter1045);
+ oprot.writeString(_iter1053);
}
oprot.writeListEnd();
}
@@ -66901,9 +66893,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (String _iter1046 : struct.success)
+ for (String _iter1054 : struct.success)
{
- oprot.writeString(_iter1046);
+ oprot.writeString(_iter1054);
}
}
}
@@ -66918,13 +66910,13 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TList _list1047 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
- struct.success = new ArrayList<String>(_list1047.size);
- String _elem1048;
- for (int _i1049 = 0; _i1049 < _list1047.size; ++_i1049)
+ org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+ struct.success = new ArrayList<String>(_list1055.size);
+ String _elem1056;
+ for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057)
{
- _elem1048 = iprot.readString();
- struct.success.add(_elem1048);
+ _elem1056 = iprot.readString();
+ struct.success.add(_elem1056);
}
}
struct.setSuccessIsSet(true);
@@ -67898,13 +67890,13 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list1050 = iprot.readListBegin();
- struct.success = new ArrayList<String>(_list1050.size);
- String _elem1051;
- for (int _i1052 = 0; _i1052 < _list1050.size; ++_i1052)
+ org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin();
+ struct.success = new ArrayList<String>(_list1058.size);
+ String _elem1059;
+ for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060)
{
- _elem1051 = iprot.readString();
- struct.success.add(_elem1051);
+ _elem1059 = iprot.readString();
+ struct.success.add(_elem1059);
}
iprot.readListEnd();
}
@@ -67939,9 +67931,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
- for (String _iter1053 : struct.success)
+ for (String _iter1061 : struct.success)
{
- oprot.writeString(_iter1053);
+ oprot.writeString(_iter1061);
}
oprot.writeListEnd();
}
@@ -67980,9 +67972,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (String _iter1054 : struct.success)
+ for (String _iter1062 : struct.success)
{
- oprot.writeString(_iter1054);
+ oprot.writeString(_iter1062);
}
}
}
@@ -67997,13 +67989,13 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
- struct.success = new ArrayList<String>(_list1055.size);
- String _elem1056;
- for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057)
+ org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+ struct.success = new ArrayList<String>(_list1063.size);
+ String _elem1064;
+ for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065)
{
- _elem1056 = iprot.readString();
- struct.success.add(_elem1056);
+ _elem1064 = iprot.readString();
+ struct.success.add(_elem1064);
}
}
struct.setSuccessIsSet(true);
@@ -68769,13 +68761,13 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin();
- struct.success = new ArrayList<String>(_list1058.size);
- String _elem1059;
- for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060)
+ org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin();
+ struct.success = new ArrayList<String>(_list1066.size);
+ String _elem1067;
+ for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068)
{
- _elem1059 = iprot.readString();
- struct.success.add(_elem1059);
+ _elem1067 = iprot.readString();
+ struct.success.add(_elem1067);
}
iprot.readListEnd();
}
@@ -68810,9 +68802,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
- for (String _iter1061 : struct.success)
+ for (String _iter1069 : struct.success)
{
- oprot.writeString(_iter1061);
+ oprot.writeString(_iter1069);
}
oprot.writeListEnd();
}
@@ -68851,9 +68843,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (String _iter1062 : struct.success)
+ for (String _iter1070 : struct.success)
{
- oprot.writeString(_iter1062);
+ oprot.writeString(_iter1070);
}
}
}
@@ -68868,13 +68860,13 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
- struct.success = new ArrayList<String>(_list1063.size);
- String _elem1064;
- for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065)
+ org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+ struct.success = new ArrayList<String>(_list1071.size);
+ String _elem1072;
+ for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073)
{
- _elem1064 = iprot.readString();
- struct.success.add(_elem1064);
+ _elem1072 = iprot.readString();
+ struct.success.add(_elem1072);
}
}
struct.setSuccessIsSet(true);
@@ -69379,13 +69371,13 @@ import org.slf4j.LoggerFactory;
case 3: // TBL_TYPES
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin();
- struct.tbl_types = new ArrayList<String>(_list1066.size);
- String _elem1067;
- for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068)
+ org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin();
+ struct.tbl_types = new ArrayList<String>(_list1074.size);
+ String _elem1075;
+ for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076)
{
- _elem1067 = iprot.readString();
- struct.tbl_types.add(_elem1067);
+ _elem1075 = iprot.readString();
+ struct.tbl_types.add(_elem1075);
}
iprot.readListEnd();
}
@@ -69421,9 +69413,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size()));
- for (String _iter1069 : struct.tbl_types)
+ for (String _iter1077 : struct.tbl_types)
{
- oprot.writeString(_iter1069);
+ oprot.writeString(_iter1077);
}
oprot.writeListEnd();
}
@@ -69466,9 +69458,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetTbl_types()) {
{
oprot.writeI32(struct.tbl_types.size());
- for (String _iter1070 : struct.tbl_types)
+ for (String _iter1078 : struct.tbl_types)
{
- oprot.writeString(_iter1070);
+ oprot.writeString(_iter1078);
}
}
}
@@ -69488,13 +69480,13 @@ import org.slf4j.LoggerFactory;
}
if (incoming.get(2)) {
{
- org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
- struct.tbl_types = new ArrayList<String>(_list1071.size);
- String _elem1072;
- for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073)
+ org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+ struct.tbl_types = new ArrayList<String>(_list1079.size);
+ String _elem1080;
+ for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081)
{
- _elem1072 = iprot.readString();
- struct.tbl_types.add(_elem1072);
+ _elem1080 = iprot.readString();
+ struct.tbl_types.add(_elem1080);
}
}
struct.setTbl_typesIsSet(true);
@@ -69900,14 +69892,14 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin();
- struct.success = new ArrayList<TableMeta>(_list1074.size);
- TableMeta _elem1075;
- for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076)
+ org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin();
+ struct.success = new ArrayList<TableMeta>(_list1082.size);
+ TableMeta _elem1083;
+ for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084)
{
- _elem1075 = new TableMeta();
- _elem1075.read(iprot);
- struct.success.add(_elem1075);
+ _elem1083 = new TableMeta();
+ _elem1083.read(iprot);
+ struct.success.add(_elem1083);
}
iprot.readListEnd();
}
@@ -69942,9 +69934,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
- for (TableMeta _iter1077 : struct.success)
+ for (TableMeta _iter1085 : struct.success)
{
- _iter1077.write(oprot);
+ _iter1085.write(oprot);
}
oprot.writeListEnd();
}
@@ -69983,9 +69975,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (TableMeta _iter1078 : struct.success)
+ for (TableMeta _iter1086 : struct.success)
{
- _iter1078.write(oprot);
+ _iter1086.write(oprot);
}
}
}
@@ -70000,14 +69992,14 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.success = new ArrayList<TableMeta>(_list1079.size);
- TableMeta _elem1080;
- for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081)
+ org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.success = new ArrayList<TableMeta>(_list1087.size);
+ TableMeta _elem1088;
+ for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089)
{
- _elem1080 = new TableMeta();
- _elem1080.read(iprot);
- struct.success.add(_elem1080);
+ _elem1088 = new TableMeta();
+ _elem1088.read(iprot);
+ struct.success.add(_elem1088);
}
}
struct.setSuccessIsSet(true);
@@ -70773,13 +70765,13 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin();
- struct.success = new ArrayList<String>(_list1082.size);
- String _elem1083;
- for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084)
+ org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin();
+ struct.success = new ArrayList<String>(_list1090.size);
+ String _elem1091;
+ for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092)
{
- _elem1083 = iprot.readString();
- struct.success.add(_elem1083);
+ _elem1091 = iprot.readString();
+ struct.success.add(_elem1091);
}
iprot.readListEnd();
}
@@ -70814,9 +70806,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
- for (String _iter1085 : struct.success)
+ for (String _iter1093 : struct.success)
{
- oprot.writeString(_iter1085);
+ oprot.writeString(_iter1093);
}
oprot.writeListEnd();
}
@@ -70855,9 +70847,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (String _iter1086 : struct.success)
+ for (String _iter1094 : struct.success)
{
- oprot.writeString(_iter1086);
+ oprot.writeString(_iter1094);
}
}
}
@@ -70872,13 +70864,13 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
- struct.success = new ArrayList<String>(_list1087.size);
- String _elem1088;
- for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089)
+ org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+ struct.success = new ArrayList<String>(_list1095.size);
+ String _elem1096;
+ for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097)
{
- _elem1088 = iprot.readString();
- struct.success.add(_elem1088);
+ _elem1096 = iprot.readString();
+ struct.success.add(_elem1096);
}
}
struct.setSuccessIsSet(true);
@@ -72331,13 +72323,13 @@ import org.slf4j.LoggerFactory;
case 2: // TBL_NAMES
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin();
- struct.tbl_names = new ArrayList<String>(_list1090.size);
- String _elem1091;
- for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092)
+ org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin();
+ struct.tbl_names = new ArrayList<String>(_list1098.size);
+ String _elem1099;
+ for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100)
{
- _elem1091 = iprot.readString();
- struct.tbl_names.add(_elem1091);
+ _elem1099 = iprot.readString();
+ struct.tbl_names.add(_elem1099);
}
iprot.readListEnd();
}
@@ -72368,9 +72360,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size()));
- for (String _iter1093 : struct.tbl_names)
+ for (String _iter1101 : struct.tbl_names)
{
- oprot.writeString(_iter1093);
+ oprot.writeString(_iter1101);
}
oprot.writeListEnd();
}
@@ -72407,9 +72399,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetTbl_names()) {
{
oprot.writeI32(struct.tbl_names.size());
- for (String _iter1094 : struct.tbl_names)
+ for (String _iter1102 : struct.tbl_names)
{
- oprot.writeString(_iter1094);
+ oprot.writeString(_iter1102);
}
}
}
@@ -72425,13 +72417,13 @@ import org.slf4j.LoggerFactory;
}
if (incoming.get(1)) {
{
- org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
- struct.tbl_names = new ArrayList<String>(_list1095.size);
- String _elem1096;
- for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097)
+ org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+ struct.tbl_names = new ArrayList<String>(_list1103.size);
+ String _elem1104;
+ for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105)
{
- _elem1096 = iprot.readString();
- struct.tbl_names.add(_elem1096);
+ _elem1104 = iprot.readString();
+ struct.tbl_names.add(_elem1104);
}
}
struct.setTbl_namesIsSet(true);
@@ -72756,14 +72748,14 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin();
- struct.success = new ArrayList<Table>(_list1098.size);
- Table _elem1099;
- for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100)
+ org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin();
+ struct.success = new ArrayList<Table>(_list1106.size);
+ Table _elem1107;
+ for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108)
{
- _elem1099 = new Table();
- _elem1099.read(iprot);
- struct.success.add(_elem1099);
+ _elem1107 = new Table();
+ _elem1107.read(iprot);
+ struct.success.add(_elem1107);
}
iprot.readListEnd();
}
@@ -72789,9 +72781,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
- for (Table _iter1101 : struct.success)
+ for (Table _iter1109 : struct.success)
{
- _iter1101.write(oprot);
+ _iter1109.write(oprot);
}
oprot.writeListEnd();
}
@@ -72822,9 +72814,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (Table _iter1102 : struct.success)
+ for (Table _iter1110 : struct.success)
{
- _iter1102.write(oprot);
+ _iter1110.write(oprot);
}
}
}
@@ -72836,14 +72828,14 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(1);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.success = new ArrayList<Table>(_list1103.size);
- Table _elem1104;
- for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105)
+ org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+ struct.success = new ArrayList<Table>(_list1111.size);
+ Table _elem1112;
+ for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113)
{
- _elem1104 = new Table();
- _elem1104.read(iprot);
- struct.success.add(_elem1104);
+ _elem1112 = new Table();
+ _elem1112.read(iprot);
+ struct.success.add(_elem1112);
}
}
struct.setSuccessIsSet(true);
@@ -75236,13 +75228,13 @@ import org.slf4j.LoggerFactory;
case 2: // TBL_NAMES
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin();
- struct.tbl_names = new ArrayList<String>(_list1106.size);
- String _elem1107;
- for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108)
+ org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin();
+ struct.tbl_names = new ArrayList<String>(_list1114.size);
+ String _elem1115;
+ for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116)
{
- _elem1107 = iprot.readString();
- struct.tbl_names.add(_elem1107);
+ _elem1115 = iprot.readString();
+ struct.tbl_names.add(_elem1115);
}
iprot.readListEnd();
}
@@ -75273,9 +75265,9 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC);
{
oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size()));
- for (String _iter1109 : struct.tbl_names)
+ for (String _iter1117 : struct.tbl_names)
{
- oprot.writeString(_iter1109);
+ oprot.writeString(_iter1117);
}
oprot.writeListEnd();
}
@@ -75312,9 +75304,9 @@ import org.slf4j.LoggerFactory;
if (struct.isSetTbl_names()) {
{
oprot.writeI32(struct.tbl_names.size());
- for (String _iter1110 : struct.tbl_names)
+ for (String _iter1118 : struct.tbl_names)
{
- oprot.writeString(_iter1110);
+ oprot.writeString(_iter1118);
}
}
}
@@ -75330,13 +75322,13 @@ import org.slf4j.LoggerFactory;
}
if (incoming.get(1)) {
{
- org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
- struct.tbl_names = new ArrayList<String>(_list1111.size);
- String _elem1112;
- for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113)
+ org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+ struct.tbl_names = new ArrayList<String>(_list1119.size);
+ String _elem1120;
+ for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121)
{
- _elem1112 = iprot.readString();
- struct.tbl_names.add(_elem1112);
+ _elem1120 = iprot.readString();
+ struct.tbl_names.add(_elem1120);
}
}
struct.setTbl_namesIsSet(true);
@@ -75909,16 +75901,16 @@ import org.slf4j.LoggerFactory;
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
{
- org.apache.thrift.protocol.TMap _map1114 = iprot.readMapBegin();
- struct.success = new HashMap<String,Materialization>(2*_map1114.size);
- String _key1115;
- Materialization _val1116;
- for (int _i1117 = 0; _i1117 < _map1114.size; ++_i1117)
+ org.apache.thrift.protocol.TMap _map1122 = iprot.readMapBegin();
+ struct.success = new HashMap<String,Materialization>(2*_map1122.size);
+ String _key1123;
+ Materialization _val1124;
+ for (int _i1125 = 0; _i1125 < _map1122.size; ++_i1125)
{
- _key1115 = iprot.readString();
- _val1116 = new Materialization();
- _val1116.read(iprot);
- struct.success.put(_key1115, _val1116);
+ _key1123 = iprot.readString();
+ _val1124 = new Materialization();
+ _val1124.read(iprot);
+ struct.success.put(_key1123, _val1124);
}
iprot.readMapEnd();
}
@@ -75971,10 +75963,10 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
{
oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
- for (Map.Entry<String, Materialization> _iter1118 : struct.success.entrySet())
+ for (Map.Entry<String, Materialization> _iter1126 : struct.success.entrySet())
{
- oprot.writeString(_iter1118.getKey());
- _iter1118.getValue().write(oprot);
+ oprot.writeString(_iter1126.getKey());
+ _iter1126.getValue().write(oprot);
}
oprot.writeMapEnd();
}
@@ -76029,10 +76021,10 @@ import org.slf4j.LoggerFactory;
if (struct.isSetSuccess()) {
{
oprot.writeI32(struct.success.size());
- for (Map.Entry<String, Materialization> _iter1119 : struct.success.entrySet())
+ for (Map.Entry<String, Materialization> _iter1127 : struct.success.entrySet())
{
- oprot.writeString(_iter1119.getKey());
- _iter1119.getValue().write(oprot);
+ oprot.writeString(_iter1127.getKey());
+ _iter1127.getValue().write(oprot);
}
}
}
@@ -76053,16 +76045,16 @@ import org.slf4j.LoggerFactory;
BitSet incoming = iprot.readBitSet(4);
if (incoming.get(0)) {
{
- org.apache.thrift.protocol.TMap _map1120 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
- struct.success = new HashMap<String,Materialization>(2*_map1120.size);
- String _key1121;
<TRUNCATED>
[09/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out b/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
index e81ae06..617b873 100644
--- a/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
+++ b/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5
-POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5
-PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st double) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5a
-POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st double) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5a
@@ -42,7 +42,7 @@ STAGE PLANS:
alias: orc_merge5
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp), subtype (type: double)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -81,22 +81,22 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8
POSTHOOK: Output: default@orc_merge5a@st=1.8
POSTHOOK: Output: default@orc_merge5a@st=8.0
POSTHOOK: Output: default@orc_merge5a@st=80.0
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -112,22 +112,22 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8
POSTHOOK: Output: default@orc_merge5a@st=1.8
POSTHOOK: Output: default@orc_merge5a@st=8.0
POSTHOOK: Output: default@orc_merge5a@st=80.0
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -143,22 +143,22 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8
POSTHOOK: Output: default@orc_merge5a@st=1.8
POSTHOOK: Output: default@orc_merge5a@st=8.0
POSTHOOK: Output: default@orc_merge5a@st=80.0
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -174,22 +174,22 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8
POSTHOOK: Output: default@orc_merge5a@st=1.8
POSTHOOK: Output: default@orc_merge5a@st=8.0
POSTHOOK: Output: default@orc_merge5a@st=80.0
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -215,8 +215,8 @@ POSTHOOK: Input: default@orc_merge5a
POSTHOOK: Output: default@orc_merge5a
POSTHOOK: Output: default@orc_merge5a@st=0.8
Found 4 items
--rw-r--r-- 3 ### USER ### ### GROUP ### 613 ### HDFS DATE ### hdfs://### HDFS PATH ###
--rw-r--r-- 3 ### USER ### ### GROUP ### 613 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-r--r-- 3 ### USER ### ### GROUP ### 614 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-r--r-- 3 ### USER ### ### GROUP ### 614 ### HDFS DATE ### hdfs://### HDFS PATH ###
-rw-r--r-- 3 ### USER ### ### GROUP ### 614 ### HDFS DATE ### hdfs://### HDFS PATH ###
-rw-r--r-- 3 ### USER ### ### GROUP ### 614 ### HDFS DATE ### hdfs://### HDFS PATH ###
Found 4 items
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
index 6bd0a3f..c41dba9 100644
--- a/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
@@ -329,8 +329,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -433,8 +433,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -636,8 +636,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -814,8 +814,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -919,8 +919,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1123,8 +1123,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1292,8 +1292,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1405,8 +1405,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1574,8 +1574,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1679,8 +1679,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1849,8 +1849,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1912,8 +1912,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2017,8 +2017,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2068,8 +2068,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2299,8 +2299,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2350,8 +2350,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2574,8 +2574,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2677,8 +2677,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2880,8 +2880,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3057,8 +3057,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3161,8 +3161,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3364,8 +3364,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3542,8 +3542,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3648,8 +3648,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3816,8 +3816,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3921,8 +3921,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4126,8 +4126,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4330,8 +4330,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4508,8 +4508,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4614,8 +4614,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5076,8 +5076,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5274,8 +5274,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5377,8 +5377,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5545,8 +5545,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5649,8 +5649,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5798,8 +5798,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -5857,8 +5857,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6050,8 +6050,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6153,8 +6153,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6304,8 +6304,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6367,8 +6367,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6472,8 +6472,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6523,8 +6523,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6704,8 +6704,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6805,8 +6805,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -8917,8 +8917,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -9160,8 +9160,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -9395,8 +9395,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -9467,8 +9467,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -9753,8 +9753,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -9994,8 +9994,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -10228,8 +10228,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -10462,8 +10462,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -10968,8 +10968,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -11237,8 +11237,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -11427,8 +11427,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -11564,8 +11564,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -11636,8 +11636,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -11912,8 +11912,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -12014,8 +12014,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
index 9f5fa2a..8390a6a 100644
--- a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
@@ -77,8 +77,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -180,8 +180,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -277,8 +277,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -380,8 +380,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -477,8 +477,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -570,8 +570,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -663,8 +663,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -766,8 +766,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1119,8 +1119,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1255,8 +1255,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1391,8 +1391,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1527,8 +1527,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
index 5932c0a..3e74a8f 100644
--- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
@@ -169,8 +169,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_char_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_char_4.q.out b/ql/src/test/results/clientpositive/spark/vector_char_4.q.out
index 96b829e..c6b8203 100644
--- a/ql/src/test/results/clientpositive/spark/vector_char_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_char_4.q.out
@@ -172,8 +172,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out b/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
index 1444cd8..1cf0724 100644
--- a/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
@@ -1287,8 +1287,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
index 65f070f..fc44d8b 100644
--- a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out
@@ -254,8 +254,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -391,8 +391,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
index c5d0214..e3d815b 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
@@ -109,8 +109,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -287,8 +287,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
index 735c4dc..188546c 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_mapjoin.q.out
@@ -128,8 +128,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -202,8 +202,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -339,6 +339,17 @@ POSTHOOK: Input: default@t2_n29
9.00 9
9.00 9
9.00 9
+PREHOOK: query: select count(*) from (select t1_n48.`dec`, t2_n29.`dec` from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n48
+PREHOOK: Input: default@t2_n29
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_n48.`dec`, t2_n29.`dec` from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n48
+POSTHOOK: Input: default@t2_n29
+#### A masked pattern was here ####
+106
PREHOOK: query: explain vectorization detail
select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)
PREHOOK: type: QUERY
@@ -393,8 +404,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -467,8 +478,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -604,6 +615,17 @@ POSTHOOK: Input: default@t2_n29
9.00 48.96 9 5
9.00 48.96 9 7
9.00 48.96 9 7
+PREHOOK: query: select count(*) from (select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n48
+PREHOOK: Input: default@t2_n29
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n48
+POSTHOOK: Input: default@t2_n29
+#### A masked pattern was here ####
+106
PREHOOK: query: CREATE TABLE over1k_small(t tinyint,
si smallint,
i int,
@@ -840,6 +862,17 @@ POSTHOOK: Input: default@t1_small
POSTHOOK: Input: default@t2_small
#### A masked pattern was here ####
89.00 89
+PREHOOK: query: select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_small
+PREHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_small
+POSTHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+1
PREHOOK: query: explain vectorization detail
select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)
PREHOOK: type: QUERY
@@ -1000,6 +1033,17 @@ POSTHOOK: Input: default@t1_small
POSTHOOK: Input: default@t2_small
#### A masked pattern was here ####
89.00 15.09 89 15
+PREHOOK: query: select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_small
+PREHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_small
+POSTHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+1
PREHOOK: query: explain vectorization detail
select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)
PREHOOK: type: QUERY
@@ -1162,6 +1206,17 @@ POSTHOOK: Input: default@t1_small
POSTHOOK: Input: default@t2_small
#### A masked pattern was here ####
89.00 89
+PREHOOK: query: select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_small
+PREHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_small
+POSTHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+1
PREHOOK: query: explain vectorization detail
select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)
PREHOOK: type: QUERY
@@ -1324,3 +1379,14 @@ POSTHOOK: Input: default@t1_small
POSTHOOK: Input: default@t2_small
#### A masked pattern was here ####
89.00 15.09 89 15
+PREHOOK: query: select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_small
+PREHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_small
+POSTHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+1
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
index f9d0272..a40484f 100644
--- a/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
@@ -162,8 +162,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_elt.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_elt.q.out b/ql/src/test/results/clientpositive/spark/vector_elt.q.out
index b938d8c..db00391 100644
--- a/ql/src/test/results/clientpositive/spark/vector_elt.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_elt.q.out
@@ -62,8 +62,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -172,8 +172,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
index 02c7c50..bbce14c 100644
--- a/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
@@ -165,8 +165,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out b/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
index 168aa77..fecc962 100644
--- a/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_inner_join.q.out
@@ -86,8 +86,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -168,8 +168,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -266,8 +266,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -340,8 +340,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -462,8 +462,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -544,8 +544,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -630,8 +630,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -705,8 +705,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -791,8 +791,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -875,8 +875,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -961,8 +961,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1044,8 +1044,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1130,8 +1130,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1213,8 +1213,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1299,8 +1299,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1382,8 +1382,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1468,8 +1468,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1551,8 +1551,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vector_left_outer_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_left_outer_join.q.out b/ql/src/test/results/clientpositive/spark/vector_left_outer_join.q.out
index 858edfa..9f8dea3 100644
--- a/ql/src/test/results/clientpositive/spark/vector_left_outer_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_left_outer_join.q.out
@@ -47,8 +47,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -72,8 +72,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -128,8 +128,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
[17/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out
index 89c14d5..2b0a1e7 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_reference_windowed.q.out
@@ -271,7 +271,7 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(15,2), 1:c2:decimal(15,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(15,2)/DECIMAL_64, 1:c2:decimal(15,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: c1 (type: decimal(15,2))
outputColumnNames: c1
@@ -283,7 +283,7 @@ STAGE PLANS:
Group By Operator
aggregations: sum(c1)
Group By Vectorization:
- aggregators: VectorUDAFSumDecimal(col 0:decimal(15,2)) -> decimal(25,2)
+ aggregators: VectorUDAFSumDecimal64ToDecimal(col 0:decimal(15,2)/DECIMAL_64) -> decimal(25,2)
className: VectorGroupByOperator
groupByMode: HASH
native: false
@@ -308,8 +308,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -317,7 +316,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: c1:decimal(15,2), c2:decimal(15,2)
+ dataColumns: c1:decimal(15,2)/DECIMAL_64, c2:decimal(15,2)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -489,7 +488,7 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(15,2), 1:c2:decimal(15,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(15,2)/DECIMAL_64, 1:c2:decimal(15,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: c1 (type: decimal(15,2)), c2 (type: decimal(15,2))
outputColumnNames: c1, c2
@@ -501,10 +500,10 @@ STAGE PLANS:
Group By Operator
aggregations: sum(c1)
Group By Vectorization:
- aggregators: VectorUDAFSumDecimal(col 0:decimal(15,2)) -> decimal(25,2)
+ aggregators: VectorUDAFSumDecimal64ToDecimal(col 0:decimal(15,2)/DECIMAL_64) -> decimal(25,2)
className: VectorGroupByOperator
groupByMode: HASH
- keyExpressions: col 0:decimal(15,2), col 1:decimal(15,2)
+ keyExpressions: ConvertDecimal64ToDecimal(col 0:decimal(15,2)/DECIMAL_64) -> 3:decimal(15,2), ConvertDecimal64ToDecimal(col 1:decimal(15,2)/DECIMAL_64) -> 4:decimal(15,2)
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
@@ -530,8 +529,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -539,9 +537,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: c1:decimal(15,2), c2:decimal(15,2)
+ dataColumns: c1:decimal(15,2)/DECIMAL_64, c2:decimal(15,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(15,2), decimal(15,2)]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -726,12 +724,12 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(15,2), 1:c2:decimal(15,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(15,2)/DECIMAL_64, 1:c2:decimal(15,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(15,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(15,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,2)/DECIMAL_64) -> 3:decimal(15,2))
predicate: c1 is not null (type: boolean)
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -760,8 +758,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -769,9 +766,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: c1:decimal(15,2), c2:decimal(15,2)
+ dataColumns: c1:decimal(15,2)/DECIMAL_64, c2:decimal(15,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(15,2)]
Map 5
Map Operator Tree:
TableScan
@@ -779,12 +776,12 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(15,2), 1:c2:decimal(15,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(15,2)/DECIMAL_64, 1:c2:decimal(15,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(15,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(15,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,2)/DECIMAL_64) -> 3:decimal(15,2))
predicate: c1 is not null (type: boolean)
Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -812,8 +809,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -821,9 +817,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: c1:decimal(15,2), c2:decimal(15,2)
+ dataColumns: c1:decimal(15,2)/DECIMAL_64, c2:decimal(15,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(15,2)]
Reducer 2
Execution mode: llap
Reduce Operator Tree:
@@ -1035,12 +1031,12 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(15,2), 1:c2:decimal(15,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(15,2)/DECIMAL_64, 1:c2:decimal(15,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(15,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(15,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,2)/DECIMAL_64) -> 3:decimal(15,2))
predicate: c1 is not null (type: boolean)
Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -1068,8 +1064,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1077,9 +1072,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: c1:decimal(15,2), c2:decimal(15,2)
+ dataColumns: c1:decimal(15,2)/DECIMAL_64, c2:decimal(15,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(15,2)]
Map 5
Map Operator Tree:
TableScan
@@ -1087,12 +1082,12 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(15,2), 1:c2:decimal(15,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(15,2)/DECIMAL_64, 1:c2:decimal(15,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(15,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(15,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,2)/DECIMAL_64) -> 3:decimal(15,2))
predicate: c1 is not null (type: boolean)
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -1121,8 +1116,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1130,9 +1124,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: c1:decimal(15,2), c2:decimal(15,2)
+ dataColumns: c1:decimal(15,2)/DECIMAL_64, c2:decimal(15,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(15,2)]
Reducer 2
Execution mode: llap
Reduce Operator Tree:
@@ -1343,12 +1337,12 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(15,2), 1:c2:decimal(15,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(15,2)/DECIMAL_64, 1:c2:decimal(15,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(15,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(15,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,2)/DECIMAL_64) -> 3:decimal(15,2))
predicate: c1 is not null (type: boolean)
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -1377,8 +1371,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1386,9 +1379,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: c1:decimal(15,2), c2:decimal(15,2)
+ dataColumns: c1:decimal(15,2)/DECIMAL_64, c2:decimal(15,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(15,2)]
Map 4
Map Operator Tree:
TableScan
@@ -1396,12 +1389,12 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(15,2), 1:c2:decimal(15,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(15,2)/DECIMAL_64, 1:c2:decimal(15,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(15,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(15,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(15,2)/DECIMAL_64) -> 3:decimal(15,2))
predicate: c1 is not null (type: boolean)
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -1430,8 +1423,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1439,9 +1431,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: c1:decimal(15,2), c2:decimal(15,2)
+ dataColumns: c1:decimal(15,2)/DECIMAL_64, c2:decimal(15,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(15,2)]
Reducer 2
Execution mode: llap
Reduce Operator Tree:
@@ -1571,7 +1563,7 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(7,2), 1:c2:decimal(7,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(7,2)/DECIMAL_64, 1:c2:decimal(7,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: c1 (type: decimal(7,2))
outputColumnNames: c1
@@ -1583,7 +1575,7 @@ STAGE PLANS:
Group By Operator
aggregations: sum(c1)
Group By Vectorization:
- aggregators: VectorUDAFSumDecimal(col 0:decimal(7,2)) -> decimal(17,2)
+ aggregators: VectorUDAFSumDecimal64(col 0:decimal(7,2)/DECIMAL_64) -> decimal(17,2)/DECIMAL_64
className: VectorGroupByOperator
groupByMode: HASH
native: false
@@ -1608,8 +1600,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1617,7 +1608,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: c1:decimal(7,2), c2:decimal(7,2)
+ dataColumns: c1:decimal(7,2)/DECIMAL_64, c2:decimal(7,2)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1789,7 +1780,7 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(7,2), 1:c2:decimal(7,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(7,2)/DECIMAL_64, 1:c2:decimal(7,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: c1 (type: decimal(7,2)), c2 (type: decimal(7,2))
outputColumnNames: c1, c2
@@ -1801,10 +1792,10 @@ STAGE PLANS:
Group By Operator
aggregations: sum(c1)
Group By Vectorization:
- aggregators: VectorUDAFSumDecimal(col 0:decimal(7,2)) -> decimal(17,2)
+ aggregators: VectorUDAFSumDecimal64(col 0:decimal(7,2)/DECIMAL_64) -> decimal(17,2)/DECIMAL_64
className: VectorGroupByOperator
groupByMode: HASH
- keyExpressions: col 0:decimal(7,2), col 1:decimal(7,2)
+ keyExpressions: ConvertDecimal64ToDecimal(col 0:decimal(7,2)/DECIMAL_64) -> 3:decimal(7,2), ConvertDecimal64ToDecimal(col 1:decimal(7,2)/DECIMAL_64) -> 4:decimal(7,2)
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
@@ -1830,8 +1821,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1839,9 +1829,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: c1:decimal(7,2), c2:decimal(7,2)
+ dataColumns: c1:decimal(7,2)/DECIMAL_64, c2:decimal(7,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(7,2), decimal(7,2)]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -2026,12 +2016,12 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(7,2), 1:c2:decimal(7,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(7,2)/DECIMAL_64, 1:c2:decimal(7,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(7,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(7,2)/DECIMAL_64) -> 3:decimal(7,2))
predicate: c1 is not null (type: boolean)
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -2060,8 +2050,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2069,9 +2058,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: c1:decimal(7,2), c2:decimal(7,2)
+ dataColumns: c1:decimal(7,2)/DECIMAL_64, c2:decimal(7,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(7,2)]
Map 5
Map Operator Tree:
TableScan
@@ -2079,12 +2068,12 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(7,2), 1:c2:decimal(7,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(7,2)/DECIMAL_64, 1:c2:decimal(7,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(7,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(7,2)/DECIMAL_64) -> 3:decimal(7,2))
predicate: c1 is not null (type: boolean)
Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -2112,8 +2101,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2121,9 +2109,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: c1:decimal(7,2), c2:decimal(7,2)
+ dataColumns: c1:decimal(7,2)/DECIMAL_64, c2:decimal(7,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(7,2)]
Reducer 2
Execution mode: llap
Reduce Operator Tree:
@@ -2335,12 +2323,12 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(7,2), 1:c2:decimal(7,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(7,2)/DECIMAL_64, 1:c2:decimal(7,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(7,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(7,2)/DECIMAL_64) -> 3:decimal(7,2))
predicate: c1 is not null (type: boolean)
Statistics: Num rows: 4 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -2368,8 +2356,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2377,9 +2364,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: c1:decimal(7,2), c2:decimal(7,2)
+ dataColumns: c1:decimal(7,2)/DECIMAL_64, c2:decimal(7,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(7,2)]
Map 5
Map Operator Tree:
TableScan
@@ -2387,12 +2374,12 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(7,2), 1:c2:decimal(7,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(7,2)/DECIMAL_64, 1:c2:decimal(7,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(7,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(7,2)/DECIMAL_64) -> 3:decimal(7,2))
predicate: c1 is not null (type: boolean)
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -2421,8 +2408,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2430,9 +2416,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: c1:decimal(7,2), c2:decimal(7,2)
+ dataColumns: c1:decimal(7,2)/DECIMAL_64, c2:decimal(7,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(7,2)]
Reducer 2
Execution mode: llap
Reduce Operator Tree:
@@ -2643,12 +2629,12 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(7,2), 1:c2:decimal(7,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(7,2)/DECIMAL_64, 1:c2:decimal(7,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(7,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(7,2)/DECIMAL_64) -> 3:decimal(7,2))
predicate: c1 is not null (type: boolean)
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -2677,8 +2663,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2686,9 +2671,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: c1:decimal(7,2), c2:decimal(7,2)
+ dataColumns: c1:decimal(7,2)/DECIMAL_64, c2:decimal(7,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(7,2)]
Map 4
Map Operator Tree:
TableScan
@@ -2696,12 +2681,12 @@ STAGE PLANS:
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:c1:decimal(7,2), 1:c2:decimal(7,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:c1:decimal(7,2)/DECIMAL_64, 1:c2:decimal(7,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(7,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(7,2)/DECIMAL_64) -> 3:decimal(7,2))
predicate: c1 is not null (type: boolean)
Statistics: Num rows: 4 Data size: 896 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -2730,8 +2715,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2739,9 +2723,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: c1:decimal(7,2), c2:decimal(7,2)
+ dataColumns: c1:decimal(7,2)/DECIMAL_64, c2:decimal(7,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(7,2)]
Reducer 2
Execution mode: llap
Reduce Operator Tree:
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out b/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out
index 068453f..bf8e2d8 100644
--- a/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_partition_diff_num_cols.q.out
@@ -134,8 +134,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -304,8 +304,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -474,8 +474,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -631,8 +631,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -788,8 +788,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out b/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out
index 1de9ed4..1bbb9d0 100644
--- a/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_partitioned_date_time.q.out
@@ -297,8 +297,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -475,8 +475,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -969,8 +969,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1204,8 +1204,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1406,8 +1406,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1924,8 +1924,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2159,8 +2159,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2361,8 +2361,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_ptf_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_ptf_1.q.out b/ql/src/test/results/clientpositive/llap/vector_ptf_1.q.out
index 568549d..ef4934e 100644
--- a/ql/src/test/results/clientpositive/llap/vector_ptf_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_ptf_1.q.out
@@ -111,8 +111,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out b/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out
index ec8611d..2471c5d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_ptf_part_simple.q.out
@@ -153,8 +153,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -413,8 +413,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -642,8 +642,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -872,8 +872,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1134,8 +1134,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1364,8 +1364,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1595,8 +1595,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1858,8 +1858,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2089,8 +2089,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2312,8 +2312,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2540,8 +2540,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2768,8 +2768,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2966,8 +2966,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3196,8 +3196,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3426,8 +3426,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3625,8 +3625,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3856,8 +3856,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4087,8 +4087,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4326,8 +4326,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4555,8 +4555,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4804,8 +4804,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5033,8 +5033,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5256,8 +5256,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5456,8 +5456,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5657,8 +5657,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5827,8 +5827,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6157,8 +6157,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6357,8 +6357,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6559,8 +6559,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6760,8 +6760,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out
index 0a3127c..a3ad696 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce1.q.out
@@ -151,8 +151,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out
index afe443b..8ff51ac 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce2.q.out
@@ -151,8 +151,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out
index f0faa56..e26c8b2 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce3.q.out
@@ -151,8 +151,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
index c92c5cd..b241f30 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_decimal.q.out
@@ -89,8 +89,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out
index 1ed694d..bda96da 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out
@@ -128,8 +128,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_retry_failure.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_retry_failure.q.out b/ql/src/test/results/clientpositive/llap/vector_retry_failure.q.out
index 64e158e..c2342b2 100644
--- a/ql/src/test/results/clientpositive/llap/vector_retry_failure.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_retry_failure.q.out
@@ -80,8 +80,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_reuse_scratchcols.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reuse_scratchcols.q.out b/ql/src/test/results/clientpositive/llap/vector_reuse_scratchcols.q.out
index 8fb0752..c95d08a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_reuse_scratchcols.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_reuse_scratchcols.q.out
@@ -139,8 +139,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -346,8 +346,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out b/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
index 284b57f..38d9172 100644
--- a/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_string_concat.q.out
@@ -155,8 +155,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -379,8 +379,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out b/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out
index 8e55ed3..ff84fe8 100644
--- a/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_string_decimal.q.out
@@ -70,7 +70,7 @@ STAGE PLANS:
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterDoubleColumnInList(col 2:double, values [1.0E8, 2.0E8])(children: CastDecimalToDouble(col 0:decimal(18,0)) -> 2:double)
+ predicateExpression: FilterDoubleColumnInList(col 3:double, values [1.0E8, 2.0E8])(children: CastDecimalToDouble(col 2:decimal(18,0))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,0)/DECIMAL_64) -> 2:decimal(18,0)) -> 3:double)
predicate: (UDFToDouble(id)) IN (1.0E8D, 2.0E8D) (type: boolean)
Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -96,8 +96,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out b/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out
index f210b72..671d020 100644
--- a/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_struct_in.q.out
@@ -94,8 +94,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -211,8 +211,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -352,8 +352,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -469,8 +469,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -610,8 +610,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -727,8 +727,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -871,8 +871,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -991,8 +991,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
[16/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_udf1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_udf1.q.out b/ql/src/test/results/clientpositive/llap/vector_udf1.q.out
index 9e6e8e5..aef23fd 100644
--- a/ql/src/test/results/clientpositive/llap/vector_udf1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_udf1.q.out
@@ -95,8 +95,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -196,8 +196,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -297,8 +297,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -398,8 +398,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -499,8 +499,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -600,8 +600,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -701,8 +701,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -802,8 +802,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -903,8 +903,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1004,8 +1004,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1105,8 +1105,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1206,8 +1206,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1307,8 +1307,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1408,8 +1408,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1509,8 +1509,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1610,8 +1610,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1711,8 +1711,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1812,8 +1812,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1913,8 +1913,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -2014,8 +2014,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -2115,8 +2115,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2214,8 +2214,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -2311,8 +2311,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -2408,8 +2408,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -2507,8 +2507,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2608,8 +2608,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2811,8 +2811,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2954,8 +2954,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_udf2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_udf2.q.out b/ql/src/test/results/clientpositive/llap/vector_udf2.q.out
index 8e3ccc9..58c60d1 100644
--- a/ql/src/test/results/clientpositive/llap/vector_udf2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_udf2.q.out
@@ -94,8 +94,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -211,8 +211,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -298,8 +298,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_udf_adaptor_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_udf_adaptor_1.q.out b/ql/src/test/results/clientpositive/llap/vector_udf_adaptor_1.q.out
index 3bc9806..1c96cd6 100644
--- a/ql/src/test/results/clientpositive/llap/vector_udf_adaptor_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_udf_adaptor_1.q.out
@@ -314,8 +314,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -490,8 +490,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -822,8 +822,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -998,8 +998,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out b/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out
index 70f354c..6d9057c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_varchar_4.q.out
@@ -174,8 +174,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_varchar_mapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_varchar_mapjoin1.q.out b/ql/src/test/results/clientpositive/llap/vector_varchar_mapjoin1.q.out
index 117246e..90a0869 100644
--- a/ql/src/test/results/clientpositive/llap/vector_varchar_mapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_varchar_mapjoin1.q.out
@@ -177,8 +177,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -206,8 +206,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -294,8 +294,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -332,8 +332,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -422,8 +422,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -460,8 +460,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out b/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
index 5e798db..149d20a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_varchar_simple.q.out
@@ -91,8 +91,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -211,8 +211,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -345,8 +345,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out b/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out
index 4474a0f..7a50163 100644
--- a/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_when_case_null.q.out
@@ -83,8 +83,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing.q.out
index 428ee8d..a5d6167 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing.q.out
@@ -56,8 +56,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -259,8 +258,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -467,8 +465,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -646,8 +643,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -841,8 +837,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1043,8 +1038,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1251,8 +1245,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1296,8 +1289,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1537,8 +1529,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1697,8 +1688,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1874,8 +1864,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2053,8 +2042,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2244,8 +2232,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2411,8 +2398,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2578,8 +2564,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2798,8 +2783,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3090,8 +3074,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3369,8 +3352,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3622,8 +3604,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3836,8 +3817,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4061,8 +4041,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4258,8 +4237,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4526,8 +4504,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -4808,8 +4785,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5323,8 +5299,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6080,8 +6055,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -6263,8 +6237,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6428,8 +6401,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6585,8 +6557,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6748,8 +6719,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -6921,8 +6891,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -7088,8 +7057,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -7265,8 +7233,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -7446,8 +7413,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -7626,8 +7592,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -7824,8 +7789,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -8020,8 +7984,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -8244,8 +8207,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -8485,8 +8447,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -8674,8 +8635,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -8829,8 +8789,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -9017,8 +8976,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -9172,8 +9130,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -9331,8 +9288,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -9564,8 +9520,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -9705,8 +9660,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -9854,8 +9808,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out
index 148f82b..2bb7730 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_expressions.q.out
@@ -102,8 +102,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -296,8 +295,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -438,7 +436,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: t (type: tinyint), bo (type: boolean), s (type: string), si (type: smallint), f (type: float)
sort order: ++++-
@@ -457,8 +455,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -466,7 +463,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [0, 1, 4, 6, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -662,7 +659,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: si (type: smallint), i (type: int), s (type: string)
sort order: +++
@@ -681,8 +678,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -690,7 +686,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [1, 2, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -886,7 +882,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 204 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: b (type: bigint), si (type: smallint), s (type: string), d (type: double)
sort order: ++++
@@ -905,8 +901,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -914,7 +909,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [1, 3, 5, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1110,7 +1105,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: f (type: float), b (type: bigint)
sort order: ++
@@ -1130,8 +1125,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1139,7 +1133,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [3, 4, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1354,8 +1348,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1533,8 +1526,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1680,7 +1672,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 228 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: ts (type: timestamp), i (type: int)
sort order: ++
@@ -1700,8 +1692,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1709,7 +1700,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 7, 8]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -1944,8 +1935,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_gby.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_gby.q.out
index d87e96f..993ea61 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_gby.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_gby.q.out
@@ -70,8 +70,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -122,8 +121,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing_gby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_gby2.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_gby2.q.out
index 8dcb900..493d404 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_gby2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_gby2.q.out
@@ -74,8 +74,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -317,8 +316,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -554,8 +552,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -913,8 +910,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -965,8 +961,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_windowing_multipartitioning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_windowing_multipartitioning.q.out b/ql/src/test/results/clientpositive/llap/vector_windowing_multipartitioning.q.out
index 01bcb69..1a06f08 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_multipartitioning.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_multipartitioning.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Reduce Output Operator
key expressions: s (type: string), si (type: smallint)
sort order: ++
@@ -94,8 +94,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -103,7 +102,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [1, 3, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -10242,7 +10241,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 344 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -10269,8 +10268,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -10278,7 +10276,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [3, 7, 8, 9]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -10520,7 +10518,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -10546,8 +10544,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -10555,7 +10552,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [1, 2, 4, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -10787,7 +10784,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -10814,8 +10811,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -10823,7 +10819,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [1, 6, 7, 10]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -11060,7 +11056,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -11086,8 +11082,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -11095,7 +11090,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [2, 4, 7]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -11340,7 +11335,7 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 304 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2), 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:tinyint, 1:si:smallint, 2:i:int, 3:b:bigint, 4:f:float, 5:d:double, 6:bo:boolean, 7:s:string, 8:ts:timestamp, 9:dec:decimal(4,2)/DECIMAL_64, 10:bin:binary, 11:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
@@ -11367,8 +11362,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -11376,7 +11370,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 11
includeColumns: [1, 4, 7, 9]
- dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2), bin:binary
+ dataColumns: t:tinyint, si:smallint, i:int, b:bigint, f:float, d:double, bo:boolean, s:string, ts:timestamp, dec:decimal(4,2)/DECIMAL_64, bin:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
[61/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 5c6495e..68e34d5 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -185,6 +185,16 @@ struct SchemaVersionState {
extern const std::map<int, const char*> _SchemaVersionState_VALUES_TO_NAMES;
+struct IsolationLevelCompliance {
+ enum type {
+ YES = 1,
+ NO = 2,
+ UNKNOWN = 3
+ };
+};
+
+extern const std::map<int, const char*> _IsolationLevelCompliance_VALUES_TO_NAMES;
+
struct FunctionType {
enum type {
JAVA = 1
@@ -667,6 +677,10 @@ class RuntimeStat;
class GetRuntimeStatsRequest;
+class AlterPartitionsRequest;
+
+class AlterPartitionsResponse;
+
class MetaException;
class UnknownTableException;
@@ -3101,7 +3115,7 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj)
}
typedef struct _Table__isset {
- _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), catName(false), ownerType(true) {}
+ _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), catName(false), ownerType(true), txnId(true), validWriteIdList(false), isStatsCompliant(false) {}
bool tableName :1;
bool dbName :1;
bool owner :1;
@@ -3120,6 +3134,9 @@ typedef struct _Table__isset {
bool creationMetadata :1;
bool catName :1;
bool ownerType :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
+ bool isStatsCompliant :1;
} _Table__isset;
class Table {
@@ -3127,7 +3144,7 @@ class Table {
Table(const Table&);
Table& operator=(const Table&);
- Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), catName(), ownerType((PrincipalType::type)1) {
+ Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), catName(), ownerType((PrincipalType::type)1), txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) {
ownerType = (PrincipalType::type)1;
}
@@ -3151,6 +3168,9 @@ class Table {
CreationMetadata creationMetadata;
std::string catName;
PrincipalType::type ownerType;
+ int64_t txnId;
+ std::string validWriteIdList;
+ IsolationLevelCompliance::type isStatsCompliant;
_Table__isset __isset;
@@ -3190,6 +3210,12 @@ class Table {
void __set_ownerType(const PrincipalType::type val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const Table & rhs) const
{
if (!(tableName == rhs.tableName))
@@ -3240,6 +3266,18 @@ class Table {
return false;
else if (__isset.ownerType && !(ownerType == rhs.ownerType))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const Table &rhs) const {
@@ -3263,7 +3301,7 @@ inline std::ostream& operator<<(std::ostream& out, const Table& obj)
}
typedef struct _Partition__isset {
- _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false), catName(false) {}
+ _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false), catName(false), txnId(true), validWriteIdList(false), isStatsCompliant(false) {}
bool values :1;
bool dbName :1;
bool tableName :1;
@@ -3273,6 +3311,9 @@ typedef struct _Partition__isset {
bool parameters :1;
bool privileges :1;
bool catName :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
+ bool isStatsCompliant :1;
} _Partition__isset;
class Partition {
@@ -3280,7 +3321,7 @@ class Partition {
Partition(const Partition&);
Partition& operator=(const Partition&);
- Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0), catName() {
+ Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0), catName(), txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~Partition() throw();
@@ -3293,6 +3334,9 @@ class Partition {
std::map<std::string, std::string> parameters;
PrincipalPrivilegeSet privileges;
std::string catName;
+ int64_t txnId;
+ std::string validWriteIdList;
+ IsolationLevelCompliance::type isStatsCompliant;
_Partition__isset __isset;
@@ -3314,6 +3358,12 @@ class Partition {
void __set_catName(const std::string& val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const Partition & rhs) const
{
if (!(values == rhs.values))
@@ -3338,6 +3388,18 @@ class Partition {
return false;
else if (__isset.catName && !(catName == rhs.catName))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const Partition &rhs) const {
@@ -3537,13 +3599,16 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionListComposingS
}
typedef struct _PartitionSpec__isset {
- _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false), catName(false) {}
+ _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false), catName(false), txnId(true), validWriteIdList(false), isStatsCompliant(false) {}
bool dbName :1;
bool tableName :1;
bool rootPath :1;
bool sharedSDPartitionSpec :1;
bool partitionList :1;
bool catName :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
+ bool isStatsCompliant :1;
} _PartitionSpec__isset;
class PartitionSpec {
@@ -3551,7 +3616,7 @@ class PartitionSpec {
PartitionSpec(const PartitionSpec&);
PartitionSpec& operator=(const PartitionSpec&);
- PartitionSpec() : dbName(), tableName(), rootPath(), catName() {
+ PartitionSpec() : dbName(), tableName(), rootPath(), catName(), txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~PartitionSpec() throw();
@@ -3561,6 +3626,9 @@ class PartitionSpec {
PartitionSpecWithSharedSD sharedSDPartitionSpec;
PartitionListComposingSpec partitionList;
std::string catName;
+ int64_t txnId;
+ std::string validWriteIdList;
+ IsolationLevelCompliance::type isStatsCompliant;
_PartitionSpec__isset __isset;
@@ -3576,6 +3644,12 @@ class PartitionSpec {
void __set_catName(const std::string& val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const PartitionSpec & rhs) const
{
if (!(dbName == rhs.dbName))
@@ -3596,6 +3670,18 @@ class PartitionSpec {
return false;
else if (__isset.catName && !(catName == rhs.catName))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const PartitionSpec &rhs) const {
@@ -4404,29 +4490,58 @@ inline std::ostream& operator<<(std::ostream& out, const ColumnStatisticsDesc& o
return out;
}
+typedef struct _ColumnStatistics__isset {
+ _ColumnStatistics__isset() : txnId(true), validWriteIdList(false), isStatsCompliant(false) {}
+ bool txnId :1;
+ bool validWriteIdList :1;
+ bool isStatsCompliant :1;
+} _ColumnStatistics__isset;
class ColumnStatistics {
public:
ColumnStatistics(const ColumnStatistics&);
ColumnStatistics& operator=(const ColumnStatistics&);
- ColumnStatistics() {
+ ColumnStatistics() : txnId(-1LL), validWriteIdList(), isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~ColumnStatistics() throw();
ColumnStatisticsDesc statsDesc;
std::vector<ColumnStatisticsObj> statsObj;
+ int64_t txnId;
+ std::string validWriteIdList;
+ IsolationLevelCompliance::type isStatsCompliant;
+
+ _ColumnStatistics__isset __isset;
void __set_statsDesc(const ColumnStatisticsDesc& val);
void __set_statsObj(const std::vector<ColumnStatisticsObj> & val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const ColumnStatistics & rhs) const
{
if (!(statsDesc == rhs.statsDesc))
return false;
if (!(statsObj == rhs.statsObj))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const ColumnStatistics &rhs) const {
@@ -4449,29 +4564,42 @@ inline std::ostream& operator<<(std::ostream& out, const ColumnStatistics& obj)
return out;
}
+typedef struct _AggrStats__isset {
+ _AggrStats__isset() : isStatsCompliant(false) {}
+ bool isStatsCompliant :1;
+} _AggrStats__isset;
class AggrStats {
public:
AggrStats(const AggrStats&);
AggrStats& operator=(const AggrStats&);
- AggrStats() : partsFound(0) {
+ AggrStats() : partsFound(0), isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~AggrStats() throw();
std::vector<ColumnStatisticsObj> colStats;
int64_t partsFound;
+ IsolationLevelCompliance::type isStatsCompliant;
+
+ _AggrStats__isset __isset;
void __set_colStats(const std::vector<ColumnStatisticsObj> & val);
void __set_partsFound(const int64_t val);
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const AggrStats & rhs) const
{
if (!(colStats == rhs.colStats))
return false;
if (!(partsFound == rhs.partsFound))
return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const AggrStats &rhs) const {
@@ -4495,8 +4623,10 @@ inline std::ostream& operator<<(std::ostream& out, const AggrStats& obj)
}
typedef struct _SetPartitionsStatsRequest__isset {
- _SetPartitionsStatsRequest__isset() : needMerge(false) {}
+ _SetPartitionsStatsRequest__isset() : needMerge(false), txnId(true), validWriteIdList(false) {}
bool needMerge :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
} _SetPartitionsStatsRequest__isset;
class SetPartitionsStatsRequest {
@@ -4504,12 +4634,14 @@ class SetPartitionsStatsRequest {
SetPartitionsStatsRequest(const SetPartitionsStatsRequest&);
SetPartitionsStatsRequest& operator=(const SetPartitionsStatsRequest&);
- SetPartitionsStatsRequest() : needMerge(0) {
+ SetPartitionsStatsRequest() : needMerge(0), txnId(-1LL), validWriteIdList() {
}
virtual ~SetPartitionsStatsRequest() throw();
std::vector<ColumnStatistics> colStats;
bool needMerge;
+ int64_t txnId;
+ std::string validWriteIdList;
_SetPartitionsStatsRequest__isset __isset;
@@ -4517,6 +4649,10 @@ class SetPartitionsStatsRequest {
void __set_needMerge(const bool val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
bool operator == (const SetPartitionsStatsRequest & rhs) const
{
if (!(colStats == rhs.colStats))
@@ -4525,6 +4661,14 @@ class SetPartitionsStatsRequest {
return false;
else if (__isset.needMerge && !(needMerge == rhs.needMerge))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
return true;
}
bool operator != (const SetPartitionsStatsRequest &rhs) const {
@@ -5642,24 +5786,37 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsByExprRequest
return out;
}
+typedef struct _TableStatsResult__isset {
+ _TableStatsResult__isset() : isStatsCompliant(false) {}
+ bool isStatsCompliant :1;
+} _TableStatsResult__isset;
class TableStatsResult {
public:
TableStatsResult(const TableStatsResult&);
TableStatsResult& operator=(const TableStatsResult&);
- TableStatsResult() {
+ TableStatsResult() : isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~TableStatsResult() throw();
std::vector<ColumnStatisticsObj> tableStats;
+ IsolationLevelCompliance::type isStatsCompliant;
+
+ _TableStatsResult__isset __isset;
void __set_tableStats(const std::vector<ColumnStatisticsObj> & val);
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const TableStatsResult & rhs) const
{
if (!(tableStats == rhs.tableStats))
return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const TableStatsResult &rhs) const {
@@ -5682,24 +5839,37 @@ inline std::ostream& operator<<(std::ostream& out, const TableStatsResult& obj)
return out;
}
+typedef struct _PartitionsStatsResult__isset {
+ _PartitionsStatsResult__isset() : isStatsCompliant(false) {}
+ bool isStatsCompliant :1;
+} _PartitionsStatsResult__isset;
class PartitionsStatsResult {
public:
PartitionsStatsResult(const PartitionsStatsResult&);
PartitionsStatsResult& operator=(const PartitionsStatsResult&);
- PartitionsStatsResult() {
+ PartitionsStatsResult() : isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~PartitionsStatsResult() throw();
std::map<std::string, std::vector<ColumnStatisticsObj> > partStats;
+ IsolationLevelCompliance::type isStatsCompliant;
+
+ _PartitionsStatsResult__isset __isset;
void __set_partStats(const std::map<std::string, std::vector<ColumnStatisticsObj> > & val);
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const PartitionsStatsResult & rhs) const
{
if (!(partStats == rhs.partStats))
return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const PartitionsStatsResult &rhs) const {
@@ -5723,8 +5893,10 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsStatsResult&
}
typedef struct _TableStatsRequest__isset {
- _TableStatsRequest__isset() : catName(false) {}
+ _TableStatsRequest__isset() : catName(false), txnId(true), validWriteIdList(false) {}
bool catName :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
} _TableStatsRequest__isset;
class TableStatsRequest {
@@ -5732,7 +5904,7 @@ class TableStatsRequest {
TableStatsRequest(const TableStatsRequest&);
TableStatsRequest& operator=(const TableStatsRequest&);
- TableStatsRequest() : dbName(), tblName(), catName() {
+ TableStatsRequest() : dbName(), tblName(), catName(), txnId(-1LL), validWriteIdList() {
}
virtual ~TableStatsRequest() throw();
@@ -5740,6 +5912,8 @@ class TableStatsRequest {
std::string tblName;
std::vector<std::string> colNames;
std::string catName;
+ int64_t txnId;
+ std::string validWriteIdList;
_TableStatsRequest__isset __isset;
@@ -5751,6 +5925,10 @@ class TableStatsRequest {
void __set_catName(const std::string& val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
bool operator == (const TableStatsRequest & rhs) const
{
if (!(dbName == rhs.dbName))
@@ -5763,6 +5941,14 @@ class TableStatsRequest {
return false;
else if (__isset.catName && !(catName == rhs.catName))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
return true;
}
bool operator != (const TableStatsRequest &rhs) const {
@@ -5786,8 +5972,10 @@ inline std::ostream& operator<<(std::ostream& out, const TableStatsRequest& obj)
}
typedef struct _PartitionsStatsRequest__isset {
- _PartitionsStatsRequest__isset() : catName(false) {}
+ _PartitionsStatsRequest__isset() : catName(false), txnId(true), validWriteIdList(false) {}
bool catName :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
} _PartitionsStatsRequest__isset;
class PartitionsStatsRequest {
@@ -5795,7 +5983,7 @@ class PartitionsStatsRequest {
PartitionsStatsRequest(const PartitionsStatsRequest&);
PartitionsStatsRequest& operator=(const PartitionsStatsRequest&);
- PartitionsStatsRequest() : dbName(), tblName(), catName() {
+ PartitionsStatsRequest() : dbName(), tblName(), catName(), txnId(-1LL), validWriteIdList() {
}
virtual ~PartitionsStatsRequest() throw();
@@ -5804,6 +5992,8 @@ class PartitionsStatsRequest {
std::vector<std::string> colNames;
std::vector<std::string> partNames;
std::string catName;
+ int64_t txnId;
+ std::string validWriteIdList;
_PartitionsStatsRequest__isset __isset;
@@ -5817,6 +6007,10 @@ class PartitionsStatsRequest {
void __set_catName(const std::string& val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
bool operator == (const PartitionsStatsRequest & rhs) const
{
if (!(dbName == rhs.dbName))
@@ -5831,6 +6025,14 @@ class PartitionsStatsRequest {
return false;
else if (__isset.catName && !(catName == rhs.catName))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
return true;
}
bool operator != (const PartitionsStatsRequest &rhs) const {
@@ -5854,8 +6056,9 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsStatsRequest&
}
typedef struct _AddPartitionsResult__isset {
- _AddPartitionsResult__isset() : partitions(false) {}
+ _AddPartitionsResult__isset() : partitions(false), isStatsCompliant(false) {}
bool partitions :1;
+ bool isStatsCompliant :1;
} _AddPartitionsResult__isset;
class AddPartitionsResult {
@@ -5863,22 +6066,29 @@ class AddPartitionsResult {
AddPartitionsResult(const AddPartitionsResult&);
AddPartitionsResult& operator=(const AddPartitionsResult&);
- AddPartitionsResult() {
+ AddPartitionsResult() : isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~AddPartitionsResult() throw();
std::vector<Partition> partitions;
+ IsolationLevelCompliance::type isStatsCompliant;
_AddPartitionsResult__isset __isset;
void __set_partitions(const std::vector<Partition> & val);
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const AddPartitionsResult & rhs) const
{
if (__isset.partitions != rhs.__isset.partitions)
return false;
else if (__isset.partitions && !(partitions == rhs.partitions))
return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const AddPartitionsResult &rhs) const {
@@ -5902,9 +6112,11 @@ inline std::ostream& operator<<(std::ostream& out, const AddPartitionsResult& ob
}
typedef struct _AddPartitionsRequest__isset {
- _AddPartitionsRequest__isset() : needResult(true), catName(false) {}
+ _AddPartitionsRequest__isset() : needResult(true), catName(false), txnId(true), validWriteIdList(false) {}
bool needResult :1;
bool catName :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
} _AddPartitionsRequest__isset;
class AddPartitionsRequest {
@@ -5912,7 +6124,7 @@ class AddPartitionsRequest {
AddPartitionsRequest(const AddPartitionsRequest&);
AddPartitionsRequest& operator=(const AddPartitionsRequest&);
- AddPartitionsRequest() : dbName(), tblName(), ifNotExists(0), needResult(true), catName() {
+ AddPartitionsRequest() : dbName(), tblName(), ifNotExists(0), needResult(true), catName(), txnId(-1LL), validWriteIdList() {
}
virtual ~AddPartitionsRequest() throw();
@@ -5922,6 +6134,8 @@ class AddPartitionsRequest {
bool ifNotExists;
bool needResult;
std::string catName;
+ int64_t txnId;
+ std::string validWriteIdList;
_AddPartitionsRequest__isset __isset;
@@ -5937,6 +6151,10 @@ class AddPartitionsRequest {
void __set_catName(const std::string& val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
bool operator == (const AddPartitionsRequest & rhs) const
{
if (!(dbName == rhs.dbName))
@@ -5955,6 +6173,14 @@ class AddPartitionsRequest {
return false;
else if (__isset.catName && !(catName == rhs.catName))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
return true;
}
bool operator != (const AddPartitionsRequest &rhs) const {
@@ -9873,9 +10099,11 @@ inline std::ostream& operator<<(std::ostream& out, const ClientCapabilities& obj
}
typedef struct _GetTableRequest__isset {
- _GetTableRequest__isset() : capabilities(false), catName(false) {}
+ _GetTableRequest__isset() : capabilities(false), catName(false), txnId(true), validWriteIdList(false) {}
bool capabilities :1;
bool catName :1;
+ bool txnId :1;
+ bool validWriteIdList :1;
} _GetTableRequest__isset;
class GetTableRequest {
@@ -9883,7 +10111,7 @@ class GetTableRequest {
GetTableRequest(const GetTableRequest&);
GetTableRequest& operator=(const GetTableRequest&);
- GetTableRequest() : dbName(), tblName(), catName() {
+ GetTableRequest() : dbName(), tblName(), catName(), txnId(-1LL), validWriteIdList() {
}
virtual ~GetTableRequest() throw();
@@ -9891,6 +10119,8 @@ class GetTableRequest {
std::string tblName;
ClientCapabilities capabilities;
std::string catName;
+ int64_t txnId;
+ std::string validWriteIdList;
_GetTableRequest__isset __isset;
@@ -9902,6 +10132,10 @@ class GetTableRequest {
void __set_catName(const std::string& val);
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
bool operator == (const GetTableRequest & rhs) const
{
if (!(dbName == rhs.dbName))
@@ -9916,6 +10150,14 @@ class GetTableRequest {
return false;
else if (__isset.catName && !(catName == rhs.catName))
return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
return true;
}
bool operator != (const GetTableRequest &rhs) const {
@@ -9938,24 +10180,37 @@ inline std::ostream& operator<<(std::ostream& out, const GetTableRequest& obj)
return out;
}
+typedef struct _GetTableResult__isset {
+ _GetTableResult__isset() : isStatsCompliant(false) {}
+ bool isStatsCompliant :1;
+} _GetTableResult__isset;
class GetTableResult {
public:
GetTableResult(const GetTableResult&);
GetTableResult& operator=(const GetTableResult&);
- GetTableResult() {
+ GetTableResult() : isStatsCompliant((IsolationLevelCompliance::type)0) {
}
virtual ~GetTableResult() throw();
Table table;
+ IsolationLevelCompliance::type isStatsCompliant;
+
+ _GetTableResult__isset __isset;
void __set_table(const Table& val);
+ void __set_isStatsCompliant(const IsolationLevelCompliance::type val);
+
bool operator == (const GetTableResult & rhs) const
{
if (!(table == rhs.table))
return false;
+ if (__isset.isStatsCompliant != rhs.__isset.isStatsCompliant)
+ return false;
+ else if (__isset.isStatsCompliant && !(isStatsCompliant == rhs.isStatsCompliant))
+ return false;
return true;
}
bool operator != (const GetTableResult &rhs) const {
@@ -13186,6 +13441,117 @@ inline std::ostream& operator<<(std::ostream& out, const GetRuntimeStatsRequest&
return out;
}
+typedef struct _AlterPartitionsRequest__isset {
+ _AlterPartitionsRequest__isset() : txnId(true), validWriteIdList(false) {}
+ bool txnId :1;
+ bool validWriteIdList :1;
+} _AlterPartitionsRequest__isset;
+
+class AlterPartitionsRequest {
+ public:
+
+ AlterPartitionsRequest(const AlterPartitionsRequest&);
+ AlterPartitionsRequest& operator=(const AlterPartitionsRequest&);
+ AlterPartitionsRequest() : dbName(), tableName(), txnId(-1LL), validWriteIdList() {
+ }
+
+ virtual ~AlterPartitionsRequest() throw();
+ std::string dbName;
+ std::string tableName;
+ std::vector<Partition> partitions;
+ EnvironmentContext environmentContext;
+ int64_t txnId;
+ std::string validWriteIdList;
+
+ _AlterPartitionsRequest__isset __isset;
+
+ void __set_dbName(const std::string& val);
+
+ void __set_tableName(const std::string& val);
+
+ void __set_partitions(const std::vector<Partition> & val);
+
+ void __set_environmentContext(const EnvironmentContext& val);
+
+ void __set_txnId(const int64_t val);
+
+ void __set_validWriteIdList(const std::string& val);
+
+ bool operator == (const AlterPartitionsRequest & rhs) const
+ {
+ if (!(dbName == rhs.dbName))
+ return false;
+ if (!(tableName == rhs.tableName))
+ return false;
+ if (!(partitions == rhs.partitions))
+ return false;
+ if (!(environmentContext == rhs.environmentContext))
+ return false;
+ if (__isset.txnId != rhs.__isset.txnId)
+ return false;
+ else if (__isset.txnId && !(txnId == rhs.txnId))
+ return false;
+ if (__isset.validWriteIdList != rhs.__isset.validWriteIdList)
+ return false;
+ else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList))
+ return false;
+ return true;
+ }
+ bool operator != (const AlterPartitionsRequest &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const AlterPartitionsRequest & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(AlterPartitionsRequest &a, AlterPartitionsRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const AlterPartitionsRequest& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
+
+class AlterPartitionsResponse {
+ public:
+
+ AlterPartitionsResponse(const AlterPartitionsResponse&);
+ AlterPartitionsResponse& operator=(const AlterPartitionsResponse&);
+ AlterPartitionsResponse() {
+ }
+
+ virtual ~AlterPartitionsResponse() throw();
+
+ bool operator == (const AlterPartitionsResponse & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const AlterPartitionsResponse &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const AlterPartitionsResponse & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+ virtual void printTo(std::ostream& out) const;
+};
+
+void swap(AlterPartitionsResponse &a, AlterPartitionsResponse &b);
+
+inline std::ostream& operator<<(std::ostream& out, const AlterPartitionsResponse& obj)
+{
+ obj.printTo(out);
+ return out;
+}
+
typedef struct _MetaException__isset {
_MetaException__isset() : message(false) {}
bool message :1;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
index dd3a127..56e5043 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
@@ -44,6 +44,8 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField IF_NOT_EXISTS_FIELD_DESC = new org.apache.thrift.protocol.TField("ifNotExists", org.apache.thrift.protocol.TType.BOOL, (short)4);
private static final org.apache.thrift.protocol.TField NEED_RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("needResult", org.apache.thrift.protocol.TType.BOOL, (short)5);
private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6);
+ private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)7);
+ private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -57,6 +59,8 @@ import org.slf4j.LoggerFactory;
private boolean ifNotExists; // required
private boolean needResult; // optional
private String catName; // optional
+ private long txnId; // optional
+ private String validWriteIdList; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -65,7 +69,9 @@ import org.slf4j.LoggerFactory;
PARTS((short)3, "parts"),
IF_NOT_EXISTS((short)4, "ifNotExists"),
NEED_RESULT((short)5, "needResult"),
- CAT_NAME((short)6, "catName");
+ CAT_NAME((short)6, "catName"),
+ TXN_ID((short)7, "txnId"),
+ VALID_WRITE_ID_LIST((short)8, "validWriteIdList");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -92,6 +98,10 @@ import org.slf4j.LoggerFactory;
return NEED_RESULT;
case 6: // CAT_NAME
return CAT_NAME;
+ case 7: // TXN_ID
+ return TXN_ID;
+ case 8: // VALID_WRITE_ID_LIST
+ return VALID_WRITE_ID_LIST;
default:
return null;
}
@@ -134,8 +144,9 @@ import org.slf4j.LoggerFactory;
// isset id assignments
private static final int __IFNOTEXISTS_ISSET_ID = 0;
private static final int __NEEDRESULT_ISSET_ID = 1;
+ private static final int __TXNID_ISSET_ID = 2;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME};
+ private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -152,6 +163,10 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+ tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsRequest.class, metaDataMap);
}
@@ -159,6 +174,8 @@ import org.slf4j.LoggerFactory;
public AddPartitionsRequest() {
this.needResult = true;
+ this.txnId = -1L;
+
}
public AddPartitionsRequest(
@@ -198,6 +215,10 @@ import org.slf4j.LoggerFactory;
if (other.isSetCatName()) {
this.catName = other.catName;
}
+ this.txnId = other.txnId;
+ if (other.isSetValidWriteIdList()) {
+ this.validWriteIdList = other.validWriteIdList;
+ }
}
public AddPartitionsRequest deepCopy() {
@@ -214,6 +235,9 @@ import org.slf4j.LoggerFactory;
this.needResult = true;
this.catName = null;
+ this.txnId = -1L;
+
+ this.validWriteIdList = null;
}
public String getDbName() {
@@ -367,6 +391,51 @@ import org.slf4j.LoggerFactory;
}
}
+ public long getTxnId() {
+ return this.txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ setTxnIdIsSet(true);
+ }
+
+ public void unsetTxnId() {
+ __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
+ public boolean isSetTxnId() {
+ return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+ }
+
+ public void setTxnIdIsSet(boolean value) {
+ __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+ }
+
+ public String getValidWriteIdList() {
+ return this.validWriteIdList;
+ }
+
+ public void setValidWriteIdList(String validWriteIdList) {
+ this.validWriteIdList = validWriteIdList;
+ }
+
+ public void unsetValidWriteIdList() {
+ this.validWriteIdList = null;
+ }
+
+ /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
+ public boolean isSetValidWriteIdList() {
+ return this.validWriteIdList != null;
+ }
+
+ public void setValidWriteIdListIsSet(boolean value) {
+ if (!value) {
+ this.validWriteIdList = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case DB_NAME:
@@ -417,6 +486,22 @@ import org.slf4j.LoggerFactory;
}
break;
+ case TXN_ID:
+ if (value == null) {
+ unsetTxnId();
+ } else {
+ setTxnId((Long)value);
+ }
+ break;
+
+ case VALID_WRITE_ID_LIST:
+ if (value == null) {
+ unsetValidWriteIdList();
+ } else {
+ setValidWriteIdList((String)value);
+ }
+ break;
+
}
}
@@ -440,6 +525,12 @@ import org.slf4j.LoggerFactory;
case CAT_NAME:
return getCatName();
+ case TXN_ID:
+ return getTxnId();
+
+ case VALID_WRITE_ID_LIST:
+ return getValidWriteIdList();
+
}
throw new IllegalStateException();
}
@@ -463,6 +554,10 @@ import org.slf4j.LoggerFactory;
return isSetNeedResult();
case CAT_NAME:
return isSetCatName();
+ case TXN_ID:
+ return isSetTxnId();
+ case VALID_WRITE_ID_LIST:
+ return isSetValidWriteIdList();
}
throw new IllegalStateException();
}
@@ -534,6 +629,24 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_txnId = true && this.isSetTxnId();
+ boolean that_present_txnId = true && that.isSetTxnId();
+ if (this_present_txnId || that_present_txnId) {
+ if (!(this_present_txnId && that_present_txnId))
+ return false;
+ if (this.txnId != that.txnId)
+ return false;
+ }
+
+ boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
+ boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
+ if (this_present_validWriteIdList || that_present_validWriteIdList) {
+ if (!(this_present_validWriteIdList && that_present_validWriteIdList))
+ return false;
+ if (!this.validWriteIdList.equals(that.validWriteIdList))
+ return false;
+ }
+
return true;
}
@@ -571,6 +684,16 @@ import org.slf4j.LoggerFactory;
if (present_catName)
list.add(catName);
+ boolean present_txnId = true && (isSetTxnId());
+ list.add(present_txnId);
+ if (present_txnId)
+ list.add(txnId);
+
+ boolean present_validWriteIdList = true && (isSetValidWriteIdList());
+ list.add(present_validWriteIdList);
+ if (present_validWriteIdList)
+ list.add(validWriteIdList);
+
return list.hashCode();
}
@@ -642,6 +765,26 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetTxnId()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetValidWriteIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -705,6 +848,22 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetTxnId()) {
+ if (!first) sb.append(", ");
+ sb.append("txnId:");
+ sb.append(this.txnId);
+ first = false;
+ }
+ if (isSetValidWriteIdList()) {
+ if (!first) sb.append(", ");
+ sb.append("validWriteIdList:");
+ if (this.validWriteIdList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.validWriteIdList);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -825,6 +984,22 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 7: // TXN_ID
+ if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
+ case 8: // VALID_WRITE_ID_LIST
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -875,6 +1050,18 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isSetTxnId()) {
+ oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+ oprot.writeI64(struct.txnId);
+ oprot.writeFieldEnd();
+ }
+ if (struct.validWriteIdList != null) {
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
+ oprot.writeString(struct.validWriteIdList);
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -909,13 +1096,25 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCatName()) {
optionals.set(1);
}
- oprot.writeBitSet(optionals, 2);
+ if (struct.isSetTxnId()) {
+ optionals.set(2);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ optionals.set(3);
+ }
+ oprot.writeBitSet(optionals, 4);
if (struct.isSetNeedResult()) {
oprot.writeBool(struct.needResult);
}
if (struct.isSetCatName()) {
oprot.writeString(struct.catName);
}
+ if (struct.isSetTxnId()) {
+ oprot.writeI64(struct.txnId);
+ }
+ if (struct.isSetValidWriteIdList()) {
+ oprot.writeString(struct.validWriteIdList);
+ }
}
@Override
@@ -939,7 +1138,7 @@ import org.slf4j.LoggerFactory;
struct.setPartsIsSet(true);
struct.ifNotExists = iprot.readBool();
struct.setIfNotExistsIsSet(true);
- BitSet incoming = iprot.readBitSet(2);
+ BitSet incoming = iprot.readBitSet(4);
if (incoming.get(0)) {
struct.needResult = iprot.readBool();
struct.setNeedResultIsSet(true);
@@ -948,6 +1147,14 @@ import org.slf4j.LoggerFactory;
struct.catName = iprot.readString();
struct.setCatNameIsSet(true);
}
+ if (incoming.get(2)) {
+ struct.txnId = iprot.readI64();
+ struct.setTxnIdIsSet(true);
+ }
+ if (incoming.get(3)) {
+ struct.validWriteIdList = iprot.readString();
+ struct.setValidWriteIdListIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
index fe41b8c..03d1fc4 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
@@ -39,6 +39,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddPartitionsResult");
private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)2);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -47,10 +48,16 @@ import org.slf4j.LoggerFactory;
}
private List<Partition> partitions; // optional
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- PARTITIONS((short)1, "partitions");
+ PARTITIONS((short)1, "partitions"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)2, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -67,6 +74,8 @@ import org.slf4j.LoggerFactory;
switch(fieldId) {
case 1: // PARTITIONS
return PARTITIONS;
+ case 2: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -107,13 +116,15 @@ import org.slf4j.LoggerFactory;
}
// isset id assignments
- private static final _Fields optionals[] = {_Fields.PARTITIONS};
+ private static final _Fields optionals[] = {_Fields.PARTITIONS,_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.OPTIONAL,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsResult.class, metaDataMap);
}
@@ -132,6 +143,9 @@ import org.slf4j.LoggerFactory;
}
this.partitions = __this__partitions;
}
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public AddPartitionsResult deepCopy() {
@@ -141,6 +155,7 @@ import org.slf4j.LoggerFactory;
@Override
public void clear() {
this.partitions = null;
+ this.isStatsCompliant = null;
}
public int getPartitionsSize() {
@@ -181,6 +196,37 @@ import org.slf4j.LoggerFactory;
}
}
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case PARTITIONS:
@@ -191,6 +237,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -199,6 +253,9 @@ import org.slf4j.LoggerFactory;
case PARTITIONS:
return getPartitions();
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -212,6 +269,8 @@ import org.slf4j.LoggerFactory;
switch (field) {
case PARTITIONS:
return isSetPartitions();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -238,6 +297,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -250,6 +318,11 @@ import org.slf4j.LoggerFactory;
if (present_partitions)
list.add(partitions);
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -271,6 +344,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -300,6 +383,16 @@ import org.slf4j.LoggerFactory;
}
first = false;
}
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -362,6 +455,14 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 2: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -389,6 +490,13 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldEnd();
}
}
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -410,7 +518,10 @@ import org.slf4j.LoggerFactory;
if (struct.isSetPartitions()) {
optionals.set(0);
}
- oprot.writeBitSet(optionals, 1);
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(1);
+ }
+ oprot.writeBitSet(optionals, 2);
if (struct.isSetPartitions()) {
{
oprot.writeI32(struct.partitions.size());
@@ -420,12 +531,15 @@ import org.slf4j.LoggerFactory;
}
}
}
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(1);
+ BitSet incoming = iprot.readBitSet(2);
if (incoming.get(0)) {
{
org.apache.thrift.protocol.TList _list479 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
@@ -440,6 +554,10 @@ import org.slf4j.LoggerFactory;
}
struct.setPartitionsIsSet(true);
}
+ if (incoming.get(1)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
index fff212d..fea95c3 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
@@ -40,6 +40,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1);
private static final org.apache.thrift.protocol.TField PARTS_FOUND_FIELD_DESC = new org.apache.thrift.protocol.TField("partsFound", org.apache.thrift.protocol.TType.I64, (short)2);
+ private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.I32, (short)3);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -49,11 +50,17 @@ import org.slf4j.LoggerFactory;
private List<ColumnStatisticsObj> colStats; // required
private long partsFound; // required
+ private IsolationLevelCompliance isStatsCompliant; // optional
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
COL_STATS((short)1, "colStats"),
- PARTS_FOUND((short)2, "partsFound");
+ PARTS_FOUND((short)2, "partsFound"),
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ IS_STATS_COMPLIANT((short)3, "isStatsCompliant");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -72,6 +79,8 @@ import org.slf4j.LoggerFactory;
return COL_STATS;
case 2: // PARTS_FOUND
return PARTS_FOUND;
+ case 3: // IS_STATS_COMPLIANT
+ return IS_STATS_COMPLIANT;
default:
return null;
}
@@ -114,6 +123,7 @@ import org.slf4j.LoggerFactory;
// isset id assignments
private static final int __PARTSFOUND_ISSET_ID = 0;
private byte __isset_bitfield = 0;
+ private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -122,6 +132,8 @@ import org.slf4j.LoggerFactory;
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))));
tmpMap.put(_Fields.PARTS_FOUND, new org.apache.thrift.meta_data.FieldMetaData("partsFound", org.apache.thrift.TFieldRequirementType.REQUIRED,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+ tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, IsolationLevelCompliance.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AggrStats.class, metaDataMap);
}
@@ -152,6 +164,9 @@ import org.slf4j.LoggerFactory;
this.colStats = __this__colStats;
}
this.partsFound = other.partsFound;
+ if (other.isSetIsStatsCompliant()) {
+ this.isStatsCompliant = other.isStatsCompliant;
+ }
}
public AggrStats deepCopy() {
@@ -163,6 +178,7 @@ import org.slf4j.LoggerFactory;
this.colStats = null;
setPartsFoundIsSet(false);
this.partsFound = 0;
+ this.isStatsCompliant = null;
}
public int getColStatsSize() {
@@ -225,6 +241,37 @@ import org.slf4j.LoggerFactory;
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PARTSFOUND_ISSET_ID, value);
}
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public IsolationLevelCompliance getIsStatsCompliant() {
+ return this.isStatsCompliant;
+ }
+
+ /**
+ *
+ * @see IsolationLevelCompliance
+ */
+ public void setIsStatsCompliant(IsolationLevelCompliance isStatsCompliant) {
+ this.isStatsCompliant = isStatsCompliant;
+ }
+
+ public void unsetIsStatsCompliant() {
+ this.isStatsCompliant = null;
+ }
+
+ /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
+ public boolean isSetIsStatsCompliant() {
+ return this.isStatsCompliant != null;
+ }
+
+ public void setIsStatsCompliantIsSet(boolean value) {
+ if (!value) {
+ this.isStatsCompliant = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case COL_STATS:
@@ -243,6 +290,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case IS_STATS_COMPLIANT:
+ if (value == null) {
+ unsetIsStatsCompliant();
+ } else {
+ setIsStatsCompliant((IsolationLevelCompliance)value);
+ }
+ break;
+
}
}
@@ -254,6 +309,9 @@ import org.slf4j.LoggerFactory;
case PARTS_FOUND:
return getPartsFound();
+ case IS_STATS_COMPLIANT:
+ return getIsStatsCompliant();
+
}
throw new IllegalStateException();
}
@@ -269,6 +327,8 @@ import org.slf4j.LoggerFactory;
return isSetColStats();
case PARTS_FOUND:
return isSetPartsFound();
+ case IS_STATS_COMPLIANT:
+ return isSetIsStatsCompliant();
}
throw new IllegalStateException();
}
@@ -304,6 +364,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
+ boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
+ if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
+ if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
+ return false;
+ if (!this.isStatsCompliant.equals(that.isStatsCompliant))
+ return false;
+ }
+
return true;
}
@@ -321,6 +390,11 @@ import org.slf4j.LoggerFactory;
if (present_partsFound)
list.add(partsFound);
+ boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
+ list.add(present_isStatsCompliant);
+ if (present_isStatsCompliant)
+ list.add(isStatsCompliant.getValue());
+
return list.hashCode();
}
@@ -352,6 +426,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetIsStatsCompliant()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -383,6 +467,16 @@ import org.slf4j.LoggerFactory;
sb.append("partsFound:");
sb.append(this.partsFound);
first = false;
+ if (isSetIsStatsCompliant()) {
+ if (!first) sb.append(", ");
+ sb.append("isStatsCompliant:");
+ if (this.isStatsCompliant == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.isStatsCompliant);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -463,6 +557,14 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
+ case 3: // IS_STATS_COMPLIANT
+ if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
@@ -491,6 +593,13 @@ import org.slf4j.LoggerFactory;
oprot.writeFieldBegin(PARTS_FOUND_FIELD_DESC);
oprot.writeI64(struct.partsFound);
oprot.writeFieldEnd();
+ if (struct.isStatsCompliant != null) {
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -516,6 +625,14 @@ import org.slf4j.LoggerFactory;
}
}
oprot.writeI64(struct.partsFound);
+ BitSet optionals = new BitSet();
+ if (struct.isSetIsStatsCompliant()) {
+ optionals.set(0);
+ }
+ oprot.writeBitSet(optionals, 1);
+ if (struct.isSetIsStatsCompliant()) {
+ oprot.writeI32(struct.isStatsCompliant.getValue());
+ }
}
@Override
@@ -535,6 +652,11 @@ import org.slf4j.LoggerFactory;
struct.setColStatsIsSet(true);
struct.partsFound = iprot.readI64();
struct.setPartsFoundIsSet(true);
+ BitSet incoming = iprot.readBitSet(1);
+ if (incoming.get(0)) {
+ struct.isStatsCompliant = org.apache.hadoop.hive.metastore.api.IsolationLevelCompliance.findByValue(iprot.readI32());
+ struct.setIsStatsCompliantIsSet(true);
+ }
}
}
[18/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_like_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_like_2.q.out b/ql/src/test/results/clientpositive/llap/vector_like_2.q.out
index 8e132a7..f3ec37a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_like_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_like_2.q.out
@@ -74,8 +74,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_llap_text_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_llap_text_1.q.out b/ql/src/test/results/clientpositive/llap/vector_llap_text_1.q.out
index a35b816..abddf5a 100644
--- a/ql/src/test/results/clientpositive/llap/vector_llap_text_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_llap_text_1.q.out
@@ -179,8 +179,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -259,8 +258,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_map_order.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_map_order.q.out b/ql/src/test/results/clientpositive/llap/vector_map_order.q.out
index 02fc5a0..238555c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_map_order.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_map_order.q.out
@@ -75,8 +75,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
index 09a53d0..e0c7dfa 100644
--- a/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_mapjoin_reduce.q.out
@@ -67,8 +67,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -111,8 +110,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -166,8 +164,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -339,8 +336,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -384,8 +380,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -440,8 +435,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out b/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out
index 9b01d79..8d2ce82 100644
--- a/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_mr_diff_schema_alias.q.out
@@ -300,8 +300,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -329,8 +329,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out b/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out
index 4bf6a03..392c8f5 100644
--- a/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_multi_insert.q.out
@@ -149,8 +149,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_null_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_null_map.q.out b/ql/src/test/results/clientpositive/llap/vector_null_map.q.out
index 666f7fd..5394cc6 100644
--- a/ql/src/test/results/clientpositive/llap/vector_null_map.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_null_map.q.out
@@ -76,8 +76,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -149,8 +148,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_null_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_null_projection.q.out b/ql/src/test/results/clientpositive/llap/vector_null_projection.q.out
index 27eb15e..7bb01a6 100644
--- a/ql/src/test/results/clientpositive/llap/vector_null_projection.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_null_projection.q.out
@@ -83,8 +83,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out b/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out
index 9801470..8a05290 100644
--- a/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_nullsafe_join.q.out
@@ -94,8 +94,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -122,8 +122,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -231,8 +231,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -266,8 +266,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -301,8 +301,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -395,8 +395,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -423,8 +423,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -451,8 +451,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -579,8 +579,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -613,8 +613,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -647,8 +647,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -741,8 +741,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -768,8 +768,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -795,8 +795,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -987,8 +987,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1015,8 +1015,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1124,8 +1124,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1159,8 +1159,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1194,8 +1194,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1288,8 +1288,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1316,8 +1316,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1344,8 +1344,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1472,8 +1472,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1506,8 +1506,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1540,8 +1540,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1634,8 +1634,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1661,8 +1661,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1688,8 +1688,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out b/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
index 9a7bd94..fc9c453 100644
--- a/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_number_compare_projection.q.out
@@ -164,8 +164,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -290,8 +290,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_nvl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_nvl.q.out b/ql/src/test/results/clientpositive/llap/vector_nvl.q.out
index dbcb770..13ebb17 100644
--- a/ql/src/test/results/clientpositive/llap/vector_nvl.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_nvl.q.out
@@ -65,8 +65,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -160,8 +160,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -253,8 +253,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -346,8 +346,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_orc_merge_incompat_schema.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_orc_merge_incompat_schema.q.out b/ql/src/test/results/clientpositive/llap/vector_orc_merge_incompat_schema.q.out
index f1a4ea3..90e2103 100644
--- a/ql/src/test/results/clientpositive/llap/vector_orc_merge_incompat_schema.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_orc_merge_incompat_schema.q.out
@@ -161,8 +161,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -245,8 +244,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_orc_nested_column_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_orc_nested_column_pruning.q.out b/ql/src/test/results/clientpositive/llap/vector_orc_nested_column_pruning.q.out
index 74af8f8..797d994 100644
--- a/ql/src/test/results/clientpositive/llap/vector_orc_nested_column_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_orc_nested_column_pruning.q.out
@@ -161,8 +161,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -234,8 +234,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -307,8 +307,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -380,8 +380,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -453,8 +453,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -526,8 +526,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -606,8 +606,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -686,8 +686,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -766,8 +766,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1000,8 +1000,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1090,8 +1090,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1415,8 +1415,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1459,8 +1459,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1577,8 +1577,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1718,8 +1718,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1774,8 +1774,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -1877,8 +1877,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1911,8 +1911,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2025,8 +2025,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2070,8 +2070,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2228,8 +2228,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2367,8 +2367,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2506,8 +2506,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2746,8 +2746,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_orc_null_check.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_orc_null_check.q.out b/ql/src/test/results/clientpositive/llap/vector_orc_null_check.q.out
index 79eaf98..989c88e 100644
--- a/ql/src/test/results/clientpositive/llap/vector_orc_null_check.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_orc_null_check.q.out
@@ -88,8 +88,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_order_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_order_null.q.out b/ql/src/test/results/clientpositive/llap/vector_order_null.q.out
index 08c57bd..cb4053e 100644
--- a/ql/src/test/results/clientpositive/llap/vector_order_null.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_order_null.q.out
@@ -127,8 +127,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -250,8 +249,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -373,8 +371,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -496,8 +493,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -619,8 +615,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -742,8 +737,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -865,8 +859,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -988,8 +981,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1111,8 +1103,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1234,8 +1225,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1357,8 +1347,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out
index d3e10b0..0de0c33 100644
--- a/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_orderby_5.q.out
@@ -168,8 +168,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out
index 50e6a85..19e98f3 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_join0.q.out
@@ -135,8 +135,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -180,8 +180,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -270,8 +270,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -333,8 +333,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out
index 0bce01d..c74a588 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_join1.q.out
@@ -295,8 +295,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -340,8 +340,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -469,8 +469,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -513,8 +513,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -765,8 +765,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -809,8 +809,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -853,8 +853,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out
index c2dc2b3..2e90aae 100644
--- a/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_outer_join2.q.out
@@ -340,8 +340,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -384,8 +384,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -428,8 +428,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
[23/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
index 6cd1e8d..30a6770 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_aggregate.q.out
@@ -111,8 +111,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -291,8 +291,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -465,7 +465,7 @@ STAGE PLANS:
Statistics: Num rows: 12289 Data size: 2662128 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:cdouble:double, 1:cdecimal1:decimal(11,5), 2:cdecimal2:decimal(16,0), 3:cint:int, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:cdouble:double, 1:cdecimal1:decimal(11,5)/DECIMAL_64, 2:cdecimal2:decimal(16,0)/DECIMAL_64, 3:cint:int, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: cdecimal1 (type: decimal(11,5)), cdecimal2 (type: decimal(16,0)), cint (type: int)
outputColumnNames: cdecimal1, cdecimal2, cint
@@ -477,7 +477,7 @@ STAGE PLANS:
Group By Operator
aggregations: count(cdecimal1), max(cdecimal1), min(cdecimal1), sum(cdecimal1), count(cdecimal2), max(cdecimal2), min(cdecimal2), sum(cdecimal2), count()
Group By Vectorization:
- aggregators: VectorUDAFCount(col 1:decimal(11,5)) -> bigint, VectorUDAFMaxDecimal(col 1:decimal(11,5)) -> decimal(11,5), VectorUDAFMinDecimal(col 1:decimal(11,5)) -> decimal(11,5), VectorUDAFSumDecimal(col 1:decimal(11,5)) -> decimal(21,5), VectorUDAFCount(col 2:decimal(16,0)) -> bigint, VectorUDAFMaxDecimal(col 2:decimal(16,0)) -> decimal(16,0), VectorUDAFMinDecimal(col 2:decimal(16,0)) -> decimal(16,0), VectorUDAFSumDecimal(col 2:decimal(16,0)) -> decimal(26,0), VectorUDAFCountStar(*) -> bigint
+ aggregators: VectorUDAFCount(col 1:decimal(11,5)/DECIMAL_64) -> bigint, VectorUDAFMaxDecimal64(col 1:decimal(11,5)/DECIMAL_64) -> decimal(11,5)/DECIMAL_64, VectorUDAFMinDecimal64(col 1:decimal(11,5)/DECIMAL_64) -> decimal(11,5)/DECIMAL_64, VectorUDAFSumDecimal64ToDecimal(col 1:decimal(11,5)/DECIMAL_64) -> decimal(21,5), VectorUDAFCount(col 2:decimal(16,0)/DECIMAL_64) -> bigint, VectorUDAFMaxDecimal64(col 2:decimal(16,0)/DECIMAL_64) -> decimal(16,0)/DECIMAL_64, VectorUDAFMinDecimal64(col 2:decimal(16,0)/DECIMAL_64) -> decimal(16,0)/DECIMAL_64, VectorUDAFSumDecimal64ToDecimal(col 2:decimal(16,0)/DECIMAL_64) -> decimal(26,0), VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 3:int
@@ -506,8 +506,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -515,7 +514,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 4
includeColumns: [1, 2, 3]
- dataColumns: cdouble:double, cdecimal1:decimal(11,5), cdecimal2:decimal(16,0), cint:int
+ dataColumns: cdouble:double, cdecimal1:decimal(11,5)/DECIMAL_64, cdecimal2:decimal(16,0)/DECIMAL_64, cint:int
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
@@ -664,20 +663,20 @@ STAGE PLANS:
Statistics: Num rows: 12289 Data size: 2662128 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:cdouble:double, 1:cdecimal1:decimal(11,5), 2:cdecimal2:decimal(16,0), 3:cint:int, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:cdouble:double, 1:cdecimal1:decimal(11,5)/DECIMAL_64, 2:cdecimal2:decimal(16,0)/DECIMAL_64, 3:cint:int, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: cint (type: int), cdecimal1 (type: decimal(11,5)), cdecimal2 (type: decimal(16,0)), UDFToDouble(cdecimal1) (type: double), (UDFToDouble(cdecimal1) * UDFToDouble(cdecimal1)) (type: double), UDFToDouble(cdecimal2) (type: double), (UDFToDouble(cdecimal2) * UDFToDouble(cdecimal2)) (type: double)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [3, 1, 2, 5, 8, 6, 10]
- selectExpressions: CastDecimalToDouble(col 1:decimal(11,5)) -> 5:double, DoubleColMultiplyDoubleColumn(col 6:double, col 7:double)(children: CastDecimalToDouble(col 1:decimal(11,5)) -> 6:double, CastDecimalToDouble(col 1:decimal(11,5)) -> 7:double) -> 8:double, CastDecimalToDouble(col 2:decimal(16,0)) -> 6:double, DoubleColMultiplyDoubleColumn(col 7:double, col 9:double)(children: CastDecimalToDouble(col 2:decimal(16,0)) -> 7:double, CastDecimalToDouble(col 2:decimal(16,0)) -> 9:double) -> 10:double
+ projectedOutputColumnNums: [3, 1, 2, 6, 9, 7, 12]
+ selectExpressions: CastDecimalToDouble(col 5:decimal(11,5))(children: ConvertDecimal64ToDecimal(col 1:decimal(11,5)/DECIMAL_64) -> 5:decimal(11,5)) -> 6:double, DoubleColMultiplyDoubleColumn(col 7:double, col 8:double)(children: CastDecimalToDouble(col 5:decimal(11,5))(children: ConvertDecimal64ToDecimal(col 1:decimal(11,5)/DECIMAL_64) -> 5:decimal(11,5)) -> 7:double, CastDecimalToDouble(col 5:decimal(11,5))(children: ConvertDecimal64ToDecimal(col 1:decimal(11,5)/DECIMAL_64) -> 5:decimal(11,5)) -> 8:double) -> 9:double, CastDecimalToDouble(col 10:decimal(16,0))(children: ConvertDecimal64ToDecimal(col 2:decimal(16,0)/DECIMAL_64) -> 10:decimal(16,0)) -> 7:double, DoubleColMultiplyDoubleColumn(col 8:double, col 11:double)(children: CastDecimalToDouble(col 10:decimal(16,0))(children: ConvertDecimal64ToDecimal(col 2:decimal(16,0)/DECIMAL_64) -> 10:decimal(16,0)) -> 8:double, CastDecimalToDouble(col 10:decimal(16,0))(children: ConvertDecimal64ToDecimal(col 2:decima
l(16,0)/DECIMAL_64) -> 10:decimal(16,0)) -> 11:double) -> 12:double
Statistics: Num rows: 12289 Data size: 2662128 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(_col1), max(_col1), min(_col1), sum(_col1), sum(_col4), sum(_col3), count(_col2), max(_col2), min(_col2), sum(_col2), sum(_col6), sum(_col5), count()
Group By Vectorization:
- aggregators: VectorUDAFCount(col 1:decimal(11,5)) -> bigint, VectorUDAFMaxDecimal(col 1:decimal(11,5)) -> decimal(11,5), VectorUDAFMinDecimal(col 1:decimal(11,5)) -> decimal(11,5), VectorUDAFSumDecimal(col 1:decimal(11,5)) -> decimal(21,5), VectorUDAFSumDouble(col 8:double) -> double, VectorUDAFSumDouble(col 5:double) -> double, VectorUDAFCount(col 2:decimal(16,0)) -> bigint, VectorUDAFMaxDecimal(col 2:decimal(16,0)) -> decimal(16,0), VectorUDAFMinDecimal(col 2:decimal(16,0)) -> decimal(16,0), VectorUDAFSumDecimal(col 2:decimal(16,0)) -> decimal(26,0), VectorUDAFSumDouble(col 10:double) -> double, VectorUDAFSumDouble(col 6:double) -> double, VectorUDAFCountStar(*) -> bigint
+ aggregators: VectorUDAFCount(col 1:decimal(11,5)/DECIMAL_64) -> bigint, VectorUDAFMaxDecimal64(col 1:decimal(11,5)/DECIMAL_64) -> decimal(11,5)/DECIMAL_64, VectorUDAFMinDecimal64(col 1:decimal(11,5)/DECIMAL_64) -> decimal(11,5)/DECIMAL_64, VectorUDAFSumDecimal64ToDecimal(col 1:decimal(11,5)/DECIMAL_64) -> decimal(21,5), VectorUDAFSumDouble(col 9:double) -> double, VectorUDAFSumDouble(col 6:double) -> double, VectorUDAFCount(col 2:decimal(16,0)/DECIMAL_64) -> bigint, VectorUDAFMaxDecimal64(col 2:decimal(16,0)/DECIMAL_64) -> decimal(16,0)/DECIMAL_64, VectorUDAFMinDecimal64(col 2:decimal(16,0)/DECIMAL_64) -> decimal(16,0)/DECIMAL_64, VectorUDAFSumDecimal64ToDecimal(col 2:decimal(16,0)/DECIMAL_64) -> decimal(26,0), VectorUDAFSumDouble(col 12:double) -> double, VectorUDAFSumDouble(col 7:double) -> double, VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 3:int
@@ -706,8 +705,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -715,9 +713,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 4
includeColumns: [1, 2, 3]
- dataColumns: cdouble:double, cdecimal1:decimal(11,5), cdecimal2:decimal(16,0), cint:int
+ dataColumns: cdouble:double, cdecimal1:decimal(11,5)/DECIMAL_64, cdecimal2:decimal(16,0)/DECIMAL_64, cint:int
partitionColumnCount: 0
- scratchColumnTypeNames: [double, double, double, double, double, double]
+ scratchColumnTypeNames: [decimal(11,5), double, double, double, double, decimal(16,0), double, double]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_cast.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_cast.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_cast.q.out
index 67630b4..2414907 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_cast.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_cast.q.out
@@ -60,8 +60,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -184,8 +184,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out
index c01637e..024ce07 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_expressions.q.out
@@ -94,8 +94,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -234,12 +234,12 @@ STAGE PLANS:
Statistics: Num rows: 12288 Data size: 2708600 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:cdouble:double, 1:cdecimal1:decimal(10,3), 2:cdecimal2:decimal(7,2), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:cdouble:double, 1:cdecimal1:decimal(10,3)/DECIMAL_64, 2:cdecimal2:decimal(7,2)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterExprAndExpr(children: FilterDecimalColGreaterDecimalScalar(col 1:decimal(10,3), val 0), FilterDecimalColLessDecimalScalar(col 1:decimal(10,3), val 12345.5678), FilterDecimalColNotEqualDecimalScalar(col 2:decimal(7,2), val 0), FilterDecimalColGreaterDecimalScalar(col 2:decimal(7,2), val 1000), SelectColumnIsNotNull(col 0:double))
+ predicateExpression: FilterExprAndExpr(children: FilterDecimal64ColGreaterDecimal64Scalar(col 1:decimal(10,3)/DECIMAL_64, val 0), FilterDecimalColLessDecimalScalar(col 4:decimal(10,3), val 12345.5678)(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)), FilterDecimal64ColNotEqualDecimal64Scalar(col 2:decimal(7,2)/DECIMAL_64, val 0), FilterDecimal64ColGreaterDecimal64Scalar(col 2:decimal(7,2)/DECIMAL_64, val 100000), SelectColumnIsNotNull(col 0:double))
predicate: ((cdecimal1 < 12345.5678) and (cdecimal1 > 0) and (cdecimal2 <> 0) and (cdecimal2 > 1000) and cdouble is not null) (type: boolean)
Statistics: Num rows: 455 Data size: 100294 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -248,15 +248,15 @@ STAGE PLANS:
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4, 6, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
- selectExpressions: DecimalColAddDecimalColumn(col 1:decimal(10,3), col 2:decimal(7,2)) -> 4:decimal(11,3), DecimalColSubtractDecimalColumn(col 1:decimal(10,3), col 5:decimal(9,2))(children: DecimalScalarMultiplyDecimalColumn(val 2, col 2:decimal(7,2)) -> 5:decimal(9,2)) -> 6:decimal(11,3), DecimalColDivideDecimalColumn(col 7:decimal(11,3), col 2:decimal(7,2))(children: DecimalColAddDecimalScalar(col 1:decimal(10,3), val 2.34) -> 7:decimal(11,3)) -> 8:decimal(21,11), DecimalColMultiplyDecimalColumn(col 1:decimal(10,3), col 9:decimal(12,6))(children: DecimalColDivideDecimalScalar(col 2:decimal(7,2), val 3.4) -> 9:decimal(12,6)) -> 10:decimal(23,9), DecimalColModuloDecimalScalar(col 1:decimal(10,3), val 10) -> 11:decimal(5,3), CastDecimalToLong(col 1:decimal(10,3)) -> 12:int, CastDecimalToLong(col 2:decimal(7,2)) -> 13:smallint, CastDecimalToLong(col 2:decimal(7,2)) -> 14:tinyint, CastDecimalToLong(col 1:decimal(10,3)) -> 15:bigint, CastDecimalToBoolean(col 1:
decimal(10,3)) -> 16:boolean, CastDecimalToDouble(col 2:decimal(7,2)) -> 17:double, CastDecimalToFloat(col 1:decimal(10,3)) -> 18:float, CastDecimalToString(col 2:decimal(7,2)) -> 19:string, CastDecimalToTimestamp(col 1:decimal(10,3)) -> 20:timestamp
+ projectedOutputColumnNums: [6, 8, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
+ selectExpressions: DecimalColAddDecimalColumn(col 4:decimal(10,3), col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3), ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 6:decimal(11,3), DecimalColSubtractDecimalColumn(col 4:decimal(10,3), col 7:decimal(9,2))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3), DecimalScalarMultiplyDecimalColumn(val 2, col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 7:decimal(9,2)) -> 8:decimal(11,3), DecimalColDivideDecimalColumn(col 23:decimal(11,3), col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 9:decimal(11,3)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,3)/DECIMAL_64, decimal64Val 2340, decimalVal 2.34) -> 9:decimal(11,3)/DECIMAL_64) -> 23:decimal(11,3), ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMA
L_64) -> 5:decimal(7,2)) -> 10:decimal(21,11), DecimalColMultiplyDecimalColumn(col 4:decimal(10,3), col 11:decimal(12,6))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3), DecimalColDivideDecimalScalar(col 5:decimal(7,2), val 3.4)(children: ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 11:decimal(12,6)) -> 12:decimal(23,9), DecimalColModuloDecimalScalar(col 4:decimal(10,3), val 10)(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)) -> 13:decimal(5,3), CastDecimalToLong(col 4:decimal(10,3))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)) -> 14:int, CastDecimalToLong(col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 15:smallint, CastDecimalToLong(col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 16:tinyint, CastDecimalToLon
g(col 4:decimal(10,3))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)) -> 17:bigint, CastDecimalToBoolean(col 4:decimal(10,3))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)) -> 18:boolean, CastDecimalToDouble(col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 19:double, CastDecimalToFloat(col 4:decimal(10,3))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)) -> 20:float, CastDecimalToString(col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 21:string, CastDecimalToTimestamp(col 4:decimal(10,3))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)) -> 22:timestamp
Statistics: Num rows: 455 Data size: 100294 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: decimal(11,3)), _col1 (type: decimal(11,3)), _col2 (type: decimal(21,11)), _col3 (type: decimal(23,9)), _col4 (type: decimal(5,3)), _col5 (type: int), _col6 (type: smallint), _col7 (type: tinyint), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: double), _col11 (type: float), _col12 (type: string), _col13 (type: timestamp)
sort order: ++++++++++++++
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [4, 6, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
+ keyColumnNums: [6, 8, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: []
@@ -267,8 +267,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -276,9 +276,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0, 1, 2]
- dataColumns: cdouble:double, cdecimal1:decimal(10,3), cdecimal2:decimal(7,2)
+ dataColumns: cdouble:double, cdecimal1:decimal(10,3)/DECIMAL_64, cdecimal2:decimal(7,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(11,3), decimal(9,2), decimal(11,3), decimal(11,3), decimal(21,11), decimal(12,6), decimal(23,9), decimal(5,3), bigint, bigint, bigint, bigint, bigint, double, double, string, timestamp]
+ scratchColumnTypeNames: [decimal(10,3), decimal(7,2), decimal(11,3), decimal(9,2), decimal(11,3), decimal(11,3)/DECIMAL_64, decimal(21,11), decimal(12,6), decimal(23,9), decimal(5,3), bigint, bigint, bigint, bigint, bigint, double, double, string, timestamp, decimal(11,3)]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -360,4 +360,4 @@ ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14) q
POSTHOOK: type: QUERY
POSTHOOK: Input: default@decimal_test_small_n0
#### A masked pattern was here ####
-774841630076
+1273824888155
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out
index 7b56d27..61f6609 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_mapjoin.q.out
@@ -152,8 +152,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -203,8 +203,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -338,6 +338,17 @@ POSTHOOK: Input: default@t2_n29
9.00 9
9.00 9
9.00 9
+PREHOOK: query: select count(*) from (select t1_n48.`dec`, t2_n29.`dec` from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n48
+PREHOOK: Input: default@t2_n29
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_n48.`dec`, t2_n29.`dec` from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n48
+POSTHOOK: Input: default@t2_n29
+#### A masked pattern was here ####
+106
PREHOOK: query: explain vectorization detail
select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)
PREHOOK: type: QUERY
@@ -416,8 +427,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -468,8 +479,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -603,6 +614,17 @@ POSTHOOK: Input: default@t2_n29
9.00 48.96 9 5
9.00 48.96 9 7
9.00 48.96 9 7
+PREHOOK: query: select count(*) from (select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n48
+PREHOOK: Input: default@t2_n29
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n48
+POSTHOOK: Input: default@t2_n29
+#### A masked pattern was here ####
+106
PREHOOK: query: CREATE TABLE over1k_small(t tinyint,
si smallint,
i int,
@@ -708,12 +730,12 @@ STAGE PLANS:
Statistics: Num rows: 1049 Data size: 111776 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:dec:decimal(14,2), 1:value_dec:decimal(14,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:dec:decimal(14,2)/DECIMAL_64, 1:value_dec:decimal(14,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(14,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(14,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(14,2)/DECIMAL_64) -> 3:decimal(14,2))
predicate: dec is not null (type: boolean)
Statistics: Num rows: 997 Data size: 106235 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -731,8 +753,8 @@ STAGE PLANS:
0 _col0 (type: decimal(16,2))
1 _col0 (type: decimal(16,2))
Map Join Vectorization:
- bigTableKeyExpressions: col 0:decimal(16,2)
- bigTableValueExpressions: col 0:decimal(16,2)
+ bigTableKeyExpressions: ConvertDecimal64ToDecimal(col 0:decimal(16,2)/DECIMAL_64) -> 4:decimal(16,2)
+ bigTableValueExpressions: ConvertDecimal64ToDecimal(col 0:decimal(16,2)/DECIMAL_64) -> 5:decimal(16,2)
className: VectorMapJoinOperator
native: false
nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true
@@ -758,8 +780,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -767,9 +788,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: dec:decimal(14,2), value_dec:decimal(14,2)
+ dataColumns: dec:decimal(14,2)/DECIMAL_64, value_dec:decimal(14,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(14,2), decimal(16,2), decimal(16,2)]
Map 2
Map Operator Tree:
TableScan
@@ -777,12 +798,12 @@ STAGE PLANS:
Statistics: Num rows: 1049 Data size: 111776 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:dec:decimal(14,0), 1:value_dec:decimal(14,0), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:dec:decimal(14,0)/DECIMAL_64, 1:value_dec:decimal(14,0)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(14,0))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(14,0))(children: ConvertDecimal64ToDecimal(col 0:decimal(14,0)/DECIMAL_64) -> 3:decimal(14,0))
predicate: dec is not null (type: boolean)
Statistics: Num rows: 997 Data size: 106235 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -810,8 +831,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -819,9 +839,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0]
- dataColumns: dec:decimal(14,0), value_dec:decimal(14,0)
+ dataColumns: dec:decimal(14,0)/DECIMAL_64, value_dec:decimal(14,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(14,0)]
Stage: Stage-0
Fetch Operator
@@ -839,112 +859,18 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_small
POSTHOOK: Input: default@t2_small
#### A masked pattern was here ####
-14.00 14
-14.00 14
-14.00 14
-14.00 14
-14.00 14
-14.00 14
-14.00 14
-14.00 14
-14.00 14
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-45.00 45
-45.00 45
-45.00 45
-45.00 45
-45.00 45
-6.00 6
-6.00 6
-6.00 6
-6.00 6
-6.00 6
-6.00 6
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-70.00 70
-70.00 70
-70.00 70
-70.00 70
-70.00 70
-70.00 70
-70.00 70
-79.00 79
-79.00 79
-79.00 79
-79.00 79
-79.00 79
-79.00 79
-89.00 89
89.00 89
-89.00 89
-89.00 89
-89.00 89
-89.00 89
-89.00 89
-89.00 89
-89.00 89
-89.00 89
-89.00 89
-89.00 89
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
+PREHOOK: query: select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_small
+PREHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_small
+POSTHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+1
PREHOOK: query: explain vectorization detail
select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)
PREHOOK: type: QUERY
@@ -974,12 +900,12 @@ STAGE PLANS:
Statistics: Num rows: 1049 Data size: 223552 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:dec:decimal(14,2), 1:value_dec:decimal(14,2), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:dec:decimal(14,2)/DECIMAL_64, 1:value_dec:decimal(14,2)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(14,2))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(14,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(14,2)/DECIMAL_64) -> 3:decimal(14,2))
predicate: dec is not null (type: boolean)
Statistics: Num rows: 997 Data size: 212470 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -997,8 +923,8 @@ STAGE PLANS:
0 _col0 (type: decimal(16,2))
1 _col0 (type: decimal(16,2))
Map Join Vectorization:
- bigTableKeyExpressions: col 0:decimal(16,2)
- bigTableValueExpressions: col 0:decimal(16,2), col 1:decimal(14,2)
+ bigTableKeyExpressions: ConvertDecimal64ToDecimal(col 0:decimal(16,2)/DECIMAL_64) -> 4:decimal(16,2)
+ bigTableValueExpressions: ConvertDecimal64ToDecimal(col 0:decimal(16,2)/DECIMAL_64) -> 5:decimal(16,2), ConvertDecimal64ToDecimal(col 1:decimal(14,2)/DECIMAL_64) -> 3:decimal(14,2)
className: VectorMapJoinOperator
native: false
nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true
@@ -1024,8 +950,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1033,9 +958,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: dec:decimal(14,2), value_dec:decimal(14,2)
+ dataColumns: dec:decimal(14,2)/DECIMAL_64, value_dec:decimal(14,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(14,0)]
+ scratchColumnTypeNames: [decimal(14,2), decimal(16,2), decimal(16,2), decimal(14,0)]
Map 2
Map Operator Tree:
TableScan
@@ -1043,12 +968,12 @@ STAGE PLANS:
Statistics: Num rows: 1049 Data size: 223552 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:dec:decimal(14,0), 1:value_dec:decimal(14,0), 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:dec:decimal(14,0)/DECIMAL_64, 1:value_dec:decimal(14,0)/DECIMAL_64, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: SelectColumnIsNotNull(col 0:decimal(14,0))
+ predicateExpression: SelectColumnIsNotNull(col 3:decimal(14,0))(children: ConvertDecimal64ToDecimal(col 0:decimal(14,0)/DECIMAL_64) -> 3:decimal(14,0))
predicate: dec is not null (type: boolean)
Statistics: Num rows: 997 Data size: 212470 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -1077,8 +1002,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1086,9 +1010,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: dec:decimal(14,0), value_dec:decimal(14,0)
+ dataColumns: dec:decimal(14,0)/DECIMAL_64, value_dec:decimal(14,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(14,0)]
Stage: Stage-0
Fetch Operator
@@ -1106,112 +1030,18 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_small
POSTHOOK: Input: default@t2_small
#### A masked pattern was here ####
-14.00 33.66 14 10
-14.00 33.66 14 22
-14.00 33.66 14 34
-14.00 33.66 14 39
-14.00 33.66 14 42
-14.00 33.66 14 45
-14.00 33.66 14 46
-14.00 33.66 14 49
-14.00 33.66 14 5
-17.00 14.26 17 1
-17.00 14.26 17 14
-17.00 14.26 17 16
-17.00 14.26 17 19
-17.00 14.26 17 2
-17.00 14.26 17 22
-17.00 14.26 17 29
-17.00 14.26 17 3
-17.00 14.26 17 4
-17.00 14.26 17 44
-45.00 23.55 45 1
-45.00 23.55 45 2
-45.00 23.55 45 22
-45.00 23.55 45 24
-45.00 23.55 45 42
-6.00 29.78 6 16
-6.00 29.78 6 28
-6.00 29.78 6 30
-6.00 29.78 6 34
-6.00 29.78 6 36
-6.00 29.78 6 44
-62.00 21.02 62 15
-62.00 21.02 62 15
-62.00 21.02 62 21
-62.00 21.02 62 21
-62.00 21.02 62 22
-62.00 21.02 62 25
-62.00 21.02 62 29
-62.00 21.02 62 3
-62.00 21.02 62 34
-62.00 21.02 62 47
-62.00 21.02 62 47
-62.00 21.02 62 49
-64.00 37.76 64 0
-64.00 37.76 64 10
-64.00 37.76 64 10
-64.00 37.76 64 13
-64.00 37.76 64 23
-64.00 37.76 64 25
-64.00 37.76 64 26
-64.00 37.76 64 27
-64.00 37.76 64 27
-64.00 37.76 64 30
-64.00 37.76 64 32
-64.00 37.76 64 34
-64.00 37.76 64 35
-64.00 37.76 64 38
-64.00 37.76 64 40
-64.00 37.76 64 43
-64.00 37.76 64 5
-64.00 37.76 64 50
-70.00 24.59 70 2
-70.00 24.59 70 25
-70.00 24.59 70 27
-70.00 24.59 70 28
-70.00 24.59 70 3
-70.00 24.59 70 32
-70.00 24.59 70 44
-79.00 15.12 79 1
-79.00 15.12 79 15
-79.00 15.12 79 25
-79.00 15.12 79 30
-79.00 15.12 79 35
-79.00 15.12 79 35
-89.00 15.09 89 1
89.00 15.09 89 15
-89.00 15.09 89 23
-89.00 15.09 89 27
-89.00 15.09 89 28
-89.00 15.09 89 29
-89.00 15.09 89 30
-89.00 15.09 89 32
-89.00 15.09 89 39
-89.00 15.09 89 40
-89.00 15.09 89 45
-89.00 15.09 89 7
-9.00 48.96 9 12
-9.00 48.96 9 15
-9.00 48.96 9 2
-9.00 48.96 9 2
-9.00 48.96 9 2
-9.00 48.96 9 20
-9.00 48.96 9 20
-9.00 48.96 9 21
-9.00 48.96 9 21
-9.00 48.96 9 26
-9.00 48.96 9 27
-9.00 48.96 9 34
-9.00 48.96 9 38
-9.00 48.96 9 41
-9.00 48.96 9 42
-9.00 48.96 9 45
-9.00 48.96 9 48
-9.00 48.96 9 49
-9.00 48.96 9 5
-9.00 48.96 9 7
-9.00 48.96 9 7
+PREHOOK: query: select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_small
+PREHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_small
+POSTHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+1
PREHOOK: query: explain vectorization detail
select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)
PREHOOK: type: QUERY
@@ -1372,112 +1202,18 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_small
POSTHOOK: Input: default@t2_small
#### A masked pattern was here ####
-14.00 14
-14.00 14
-14.00 14
-14.00 14
-14.00 14
-14.00 14
-14.00 14
-14.00 14
-14.00 14
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-17.00 17
-45.00 45
-45.00 45
-45.00 45
-45.00 45
-45.00 45
-6.00 6
-6.00 6
-6.00 6
-6.00 6
-6.00 6
-6.00 6
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-62.00 62
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-64.00 64
-70.00 70
-70.00 70
-70.00 70
-70.00 70
-70.00 70
-70.00 70
-70.00 70
-79.00 79
-79.00 79
-79.00 79
-79.00 79
-79.00 79
-79.00 79
-89.00 89
-89.00 89
-89.00 89
-89.00 89
-89.00 89
89.00 89
-89.00 89
-89.00 89
-89.00 89
-89.00 89
-89.00 89
-89.00 89
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
-9.00 9
+PREHOOK: query: select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_small
+PREHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_small
+POSTHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+1
PREHOOK: query: explain vectorization detail
select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)
PREHOOK: type: QUERY
@@ -1639,109 +1375,15 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_small
POSTHOOK: Input: default@t2_small
#### A masked pattern was here ####
-14.00 33.66 14 10
-14.00 33.66 14 22
-14.00 33.66 14 34
-14.00 33.66 14 39
-14.00 33.66 14 42
-14.00 33.66 14 45
-14.00 33.66 14 46
-14.00 33.66 14 49
-14.00 33.66 14 5
-17.00 14.26 17 1
-17.00 14.26 17 14
-17.00 14.26 17 16
-17.00 14.26 17 19
-17.00 14.26 17 2
-17.00 14.26 17 22
-17.00 14.26 17 29
-17.00 14.26 17 3
-17.00 14.26 17 4
-17.00 14.26 17 44
-45.00 23.55 45 1
-45.00 23.55 45 2
-45.00 23.55 45 22
-45.00 23.55 45 24
-45.00 23.55 45 42
-6.00 29.78 6 16
-6.00 29.78 6 28
-6.00 29.78 6 30
-6.00 29.78 6 34
-6.00 29.78 6 36
-6.00 29.78 6 44
-62.00 21.02 62 15
-62.00 21.02 62 15
-62.00 21.02 62 21
-62.00 21.02 62 21
-62.00 21.02 62 22
-62.00 21.02 62 25
-62.00 21.02 62 29
-62.00 21.02 62 3
-62.00 21.02 62 34
-62.00 21.02 62 47
-62.00 21.02 62 47
-62.00 21.02 62 49
-64.00 37.76 64 0
-64.00 37.76 64 10
-64.00 37.76 64 10
-64.00 37.76 64 13
-64.00 37.76 64 23
-64.00 37.76 64 25
-64.00 37.76 64 26
-64.00 37.76 64 27
-64.00 37.76 64 27
-64.00 37.76 64 30
-64.00 37.76 64 32
-64.00 37.76 64 34
-64.00 37.76 64 35
-64.00 37.76 64 38
-64.00 37.76 64 40
-64.00 37.76 64 43
-64.00 37.76 64 5
-64.00 37.76 64 50
-70.00 24.59 70 2
-70.00 24.59 70 25
-70.00 24.59 70 27
-70.00 24.59 70 28
-70.00 24.59 70 3
-70.00 24.59 70 32
-70.00 24.59 70 44
-79.00 15.12 79 1
-79.00 15.12 79 15
-79.00 15.12 79 25
-79.00 15.12 79 30
-79.00 15.12 79 35
-79.00 15.12 79 35
-89.00 15.09 89 1
89.00 15.09 89 15
-89.00 15.09 89 23
-89.00 15.09 89 27
-89.00 15.09 89 28
-89.00 15.09 89 29
-89.00 15.09 89 30
-89.00 15.09 89 32
-89.00 15.09 89 39
-89.00 15.09 89 40
-89.00 15.09 89 45
-89.00 15.09 89 7
-9.00 48.96 9 12
-9.00 48.96 9 15
-9.00 48.96 9 2
-9.00 48.96 9 2
-9.00 48.96 9 2
-9.00 48.96 9 20
-9.00 48.96 9 20
-9.00 48.96 9 21
-9.00 48.96 9 21
-9.00 48.96 9 26
-9.00 48.96 9 27
-9.00 48.96 9 34
-9.00 48.96 9 38
-9.00 48.96 9 41
-9.00 48.96 9 42
-9.00 48.96 9 45
-9.00 48.96 9 48
-9.00 48.96 9 49
-9.00 48.96 9 5
-9.00 48.96 9 7
-9.00 48.96 9 7
+PREHOOK: query: select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_small
+PREHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_small
+POSTHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+1
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out
index ec43f62..e3d5044 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_math_funcs.q.out
@@ -150,8 +150,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -364,12 +364,12 @@ STAGE PLANS:
Statistics: Num rows: 12288 Data size: 1401000 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:cbigint:bigint, 1:cdouble:double, 2:cdecimal1:decimal(12,4), 3:cdecimal2:decimal(14,8), 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:cbigint:bigint, 1:cdouble:double, 2:cdecimal1:decimal(12,4)/DECIMAL_64, 3:cdecimal2:decimal(14,8)/DECIMAL_64, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongScalar(col 5:bigint, val 0)(children: LongColModuloLongScalar(col 0:bigint, val 500) -> 5:bigint), FilterDoubleColGreaterEqualDoubleScalar(col 7:double, val -1.0)(children: FuncSinDoubleToDouble(col 6:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 6:double) -> 7:double))
+ predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongScalar(col 5:bigint, val 0)(children: LongColModuloLongScalar(col 0:bigint, val 500) -> 5:bigint), FilterDoubleColGreaterEqualDoubleScalar(col 8:double, val -1.0)(children: FuncSinDoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 7:double) -> 8:double))
predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= -1.0D)) (type: boolean)
Statistics: Num rows: 2048 Data size: 233500 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -378,8 +378,8 @@ STAGE PLANS:
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2, 8, 9, 10, 11, 6, 12, 13, 14, 16, 17, 7, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 2, 29, 5, 30]
- selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 2:decimal(12,4), decimalPlaces 2) -> 8:decimal(11,2), FuncRoundDecimalToDecimal(col 2:decimal(12,4)) -> 9:decimal(9,0), FuncFloorDecimalToDecimal(col 2:decimal(12,4)) -> 10:decimal(9,0), FuncCeilDecimalToDecimal(col 2:decimal(12,4)) -> 11:decimal(9,0), RoundWithNumDigitsDoubleToDouble(col 7, decimalPlaces 58)(children: FuncExpDoubleToDouble(col 6:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 6:double) -> 7:double) -> 6:double, FuncLnDoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 7:double) -> 12:double, FuncLog10DoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 7:double) -> 13:double, FuncLog2DoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 7:double) -> 14:double, FuncLog2DoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 15:decimal(13,4))(children: DecimalColSubtractD
ecimalScalar(col 2:decimal(12,4), val 15601) -> 15:decimal(13,4)) -> 7:double) -> 16:double, FuncLogWithBaseDoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 7:double) -> 17:double, FuncPowerDoubleToDouble(col 18:double)(children: FuncLog2DoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 7:double) -> 18:double) -> 7:double, FuncPowerDoubleToDouble(col 19:double)(children: FuncLog2DoubleToDouble(col 18:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 18:double) -> 19:double) -> 18:double, FuncSqrtDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 20:double, FuncAbsDecimalToDecimal(col 2:decimal(12,4)) -> 21:decimal(12,4), FuncSinDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 22:double, FuncASinDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 23:double, FuncCosDo
ubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 24:double, FuncACosDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 25:double, FuncATanDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 26:double, FuncDegreesDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 27:double, FuncRadiansDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 28:double, FuncNegateDecimalToDecimal(col 2:decimal(12,4)) -> 29:decimal(12,4), FuncSignDecimalToLong(col 2:decimal(12,4)) -> 5:int, FuncCosDoubleToDouble(col 19:double)(children: DoubleColAddDoubleScalar(col 30:double, val 3.14159)(children: DoubleColUnaryMinus(col 19:double)(children: FuncSinDoubleToDouble(col 30:double)(children: FuncLnDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19
:double) -> 30:double) -> 19:double) -> 30:double) -> 19:double) -> 30:double
+ projectedOutputColumnNums: [2, 9, 10, 11, 12, 7, 13, 14, 15, 17, 18, 8, 19, 21, 22, 23, 24, 25, 26, 27, 28, 29, 2, 30, 5, 31]
+ selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 6:decimal(12,4), decimalPlaces 2)(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 9:decimal(11,2), FuncRoundDecimalToDecimal(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 10:decimal(9,0), FuncFloorDecimalToDecimal(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 11:decimal(9,0), FuncCeilDecimalToDecimal(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 12:decimal(9,0), RoundWithNumDigitsDoubleToDouble(col 8, decimalPlaces 58)(children: FuncExpDoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 7:double) -> 8:double) -> 7:double, FuncLnDoubleToDouble(
col 8:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 8:double) -> 13:double, FuncLog10DoubleToDouble(col 8:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 8:double) -> 14:double, FuncLog2DoubleToDouble(col 8:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 8:double) -> 15:double, FuncLog2DoubleToDouble(col 8:double)(children: CastDecimalToDouble(col 32:decimal(13,4))(children: ConvertDecimal64ToDecimal(col 16:decimal(13,4)/DECIMAL_64)(children: Decimal64ColSubtractDecimal64Scalar(col 2:decimal(12,4)/DECIMAL_64, decimal64Val 156010000, decimalVal 15601) -> 16:decimal(13,4)/DECIMAL_64) -> 32:decimal(13,4)) -> 8:double) -> 17:double, FuncLogWithBaseDoubleToDouble(col 8:double)(children
: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 8:double) -> 18:double, FuncPowerDoubleToDouble(col 19:double)(children: FuncLog2DoubleToDouble(col 8:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 8:double) -> 19:double) -> 8:double, FuncPowerDoubleToDouble(col 20:double)(children: FuncLog2DoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 19:double) -> 20:double) -> 19:double, FuncSqrtDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 21:double, FuncAbsDecimalToDecimal(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/
DECIMAL_64) -> 6:decimal(12,4)) -> 22:decimal(12,4), FuncSinDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 23:double, FuncASinDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 24:double, FuncCosDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 25:double, FuncACosDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 26:double, FuncATanDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(c
ol 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 27:double, FuncDegreesDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 28:double, FuncRadiansDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 29:double, FuncNegateDecimalToDecimal(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 30:decimal(12,4), FuncSignDecimalToLong(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 5:int, FuncCosDoubleToDouble(col 20:double)(children: DoubleColAddDoubleScalar(col 31:double, val 3.14159)(children: DoubleColUnaryMinus(col 20:double)(children: FuncSinDoubleToDouble(col 31:double)(children: Fu
ncLnDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 31:double) -> 20:double) -> 31:double) -> 20:double) -> 31:double
Statistics: Num rows: 2048 Data size: 233500 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -396,8 +396,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -405,9 +405,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 4
includeColumns: [0, 2]
- dataColumns: cbigint:bigint, cdouble:double, cdecimal1:decimal(12,4), cdecimal2:decimal(14,8)
+ dataColumns: cbigint:bigint, cdouble:double, cdecimal1:decimal(12,4)/DECIMAL_64, cdecimal2:decimal(14,8)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, double, double, decimal(11,2), decimal(9,0), decimal(9,0), decimal(9,0), double, double, double, decimal(13,4), double, double, double, double, double, decimal(12,4), double, double, double, double, double, double, double, decimal(12,4), double]
+ scratchColumnTypeNames: [bigint, decimal(12,4), double, double, decimal(11,2), decimal(9,0), decimal(9,0), decimal(9,0), double, double, double, decimal(13,4)/DECIMAL_64, double, double, double, double, double, decimal(12,4), double, double, double, double, double, double, double, decimal(12,4), double, decimal(13,4)]
Stage: Stage-0
Fetch Operator
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_precision.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_precision.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_precision.q.out
index 50e4305..5e7e8ca 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_precision.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_precision.q.out
@@ -612,8 +612,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1232,8 +1232,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out
index 6737052..eb4a588 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_round.q.out
@@ -59,15 +59,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:dec:decimal(10,0), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:dec:decimal(10,0)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0))
outputColumnNames: _col0, _col1
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [0, 2]
- selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 0:decimal(10,0), decimalPlaces -1) -> 2:decimal(11,0)
+ projectedOutputColumnNums: [0, 3]
+ selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 2:decimal(10,0), decimalPlaces -1)(children: ConvertDecimal64ToDecimal(col 0:decimal(10,0)/DECIMAL_64) -> 2:decimal(10,0)) -> 3:decimal(11,0)
Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: decimal(10,0))
@@ -77,7 +77,7 @@ STAGE PLANS:
keyColumnNums: [0]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
- valueColumnNums: [2]
+ valueColumnNums: [3]
Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: decimal(11,0))
Execution mode: vectorized, llap
@@ -86,8 +86,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -95,9 +94,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: dec:decimal(10,0)
+ dataColumns: dec:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(11,0)]
+ scratchColumnTypeNames: [decimal(10,0), decimal(11,0)]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -177,22 +176,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:dec:decimal(10,0), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:dec:decimal(10,0)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0))
outputColumnNames: _col0, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [0, 2]
- selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 0:decimal(10,0), decimalPlaces -1) -> 2:decimal(11,0)
+ projectedOutputColumnNums: [0, 3]
+ selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 2:decimal(10,0), decimalPlaces -1)(children: ConvertDecimal64ToDecimal(col 0:decimal(10,0)/DECIMAL_64) -> 2:decimal(10,0)) -> 3:decimal(11,0)
Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col2 (type: decimal(11,0))
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: [0]
@@ -204,8 +203,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -213,9 +211,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: dec:decimal(10,0)
+ dataColumns: dec:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(11,0)]
+ scratchColumnTypeNames: [decimal(10,0), decimal(11,0)]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -585,15 +583,15 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:dec:decimal(10,0), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:dec:decimal(10,0)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0))
outputColumnNames: _col0, _col1
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [0, 2]
- selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 0:decimal(10,0), decimalPlaces -1) -> 2:decimal(11,0)
+ projectedOutputColumnNums: [0, 3]
+ selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 2:decimal(10,0), decimalPlaces -1)(children: ConvertDecimal64ToDecimal(col 0:decimal(10,0)/DECIMAL_64) -> 2:decimal(10,0)) -> 3:decimal(11,0)
Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: decimal(10,0))
@@ -603,7 +601,7 @@ STAGE PLANS:
keyColumnNums: [0]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
- valueColumnNums: [2]
+ valueColumnNums: [3]
Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: decimal(11,0))
Execution mode: vectorized, llap
@@ -611,8 +609,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -620,9 +618,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: dec:decimal(10,0)
+ dataColumns: dec:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(11,0)]
+ scratchColumnTypeNames: [decimal(10,0), decimal(11,0)]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
@@ -702,22 +700,22 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:dec:decimal(10,0), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:dec:decimal(10,0)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0))
outputColumnNames: _col0, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [0, 2]
- selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 0:decimal(10,0), decimalPlaces -1) -> 2:decimal(11,0)
+ projectedOutputColumnNums: [0, 3]
+ selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 2:decimal(10,0), decimalPlaces -1)(children: ConvertDecimal64ToDecimal(col 0:decimal(10,0)/DECIMAL_64) -> 2:decimal(10,0)) -> 3:decimal(11,0)
Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col2 (type: decimal(11,0))
sort order: +
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
- keyColumnNums: [2]
+ keyColumnNums: [3]
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumnNums: [0]
@@ -728,8 +726,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -737,9 +735,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: dec:decimal(10,0)
+ dataColumns: dec:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(11,0)]
+ scratchColumnTypeNames: [decimal(10,0), decimal(11,0)]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out
index e3d4f40..bb0cbfc 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_round_2.q.out
@@ -89,8 +89,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -257,8 +257,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -452,8 +452,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -636,8 +636,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_trailing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_trailing.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_trailing.q.out
index b33f090..a35f6fe 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_trailing.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_trailing.q.out
@@ -94,7 +94,7 @@ STAGE PLANS:
Statistics: Num rows: 30 Data size: 4936 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:id:int, 1:a:decimal(10,4), 2:b:decimal(15,8), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:id:int, 1:a:decimal(10,4)/DECIMAL_64, 2:b:decimal(15,8)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: id (type: int), a (type: decimal(10,4)), b (type: decimal(15,8))
outputColumnNames: _col0, _col1, _col2
@@ -119,8 +119,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -128,7 +128,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0, 1, 2]
- dataColumns: id:int, a:decimal(10,4), b:decimal(15,8)
+ dataColumns: id:int, a:decimal(10,4)/DECIMAL_64, b:decimal(15,8)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: []
Reducer 2
[20/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
index dcedca8..815b2a3 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_1.q.out
@@ -112,8 +112,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -241,8 +241,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -378,8 +378,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -527,8 +527,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -687,8 +687,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -829,8 +829,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -953,8 +953,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1083,8 +1083,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
index 2ee7502..1cd498f 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_2.q.out
@@ -152,8 +152,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -358,8 +358,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -564,8 +564,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -770,8 +770,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -967,8 +967,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1161,8 +1161,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1345,8 +1345,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1529,8 +1529,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1723,8 +1723,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1917,8 +1917,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
index 87993d2..af5815f 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_arithmetic.q.out
@@ -106,8 +106,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -288,8 +288,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -470,8 +470,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -654,8 +654,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -759,8 +759,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -943,8 +943,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1127,8 +1127,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1309,8 +1309,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
index cfe3d5f..2b9fc34 100644
--- a/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_interval_mapjoin.q.out
@@ -256,8 +256,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -299,8 +299,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_join30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_join30.q.out b/ql/src/test/results/clientpositive/llap/vector_join30.q.out
index 9530804..5187574 100644
--- a/ql/src/test/results/clientpositive/llap/vector_join30.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_join30.q.out
@@ -79,8 +79,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -146,8 +146,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -296,8 +296,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -332,8 +332,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -457,8 +457,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -517,8 +517,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -655,8 +655,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -728,8 +728,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -770,8 +770,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -907,8 +907,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -943,8 +943,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -978,8 +978,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1137,8 +1137,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1173,8 +1173,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1208,8 +1208,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1367,8 +1367,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1403,8 +1403,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1438,8 +1438,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1597,8 +1597,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1633,8 +1633,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1668,8 +1668,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_left_outer_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_left_outer_join.q.out b/ql/src/test/results/clientpositive/llap/vector_left_outer_join.q.out
index 6201a48..3d78cfe 100644
--- a/ql/src/test/results/clientpositive/llap/vector_left_outer_join.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_left_outer_join.q.out
@@ -75,8 +75,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -100,8 +100,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -125,8 +125,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out b/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out
index 25a644d..55be910 100644
--- a/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_left_outer_join2.q.out
@@ -340,8 +340,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -376,8 +376,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -482,8 +482,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -518,8 +518,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -621,8 +621,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -657,8 +657,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -760,8 +760,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -796,8 +796,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
[07/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
index 95ebf46..1827f67 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
@@ -283,8 +283,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -491,8 +491,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: true
@@ -683,8 +683,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: true
@@ -875,8 +875,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: true
@@ -1016,8 +1016,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1141,8 +1141,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1284,8 +1284,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
index 3c9cf03..d10faeb 100644
--- a/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
+++ b/ql/src/test/results/clientpositive/tez/acid_vectorization_original_tez.q.out
@@ -370,10 +370,10 @@ POSTHOOK: Lineage: over10k_orc_bucketed_n0.si SIMPLE [(over10k_n9)over10k_n9.Fie
POSTHOOK: Lineage: over10k_orc_bucketed_n0.t SIMPLE [(over10k_n9)over10k_n9.FieldSchema(name:t, type:tinyint, comment:null), ]
POSTHOOK: Lineage: over10k_orc_bucketed_n0.ts SIMPLE [(over10k_n9)over10k_n9.FieldSchema(name:ts, type:timestamp, comment:null), ]
Found 4 items
--rw-rw-rw- 3 ### USER ### ### GROUP ### 8997 ### HDFS DATE ### hdfs://### HDFS PATH ###
--rw-rw-rw- 3 ### USER ### ### GROUP ### 7773 ### HDFS DATE ### hdfs://### HDFS PATH ###
--rw-rw-rw- 3 ### USER ### ### GROUP ### 7358 ### HDFS DATE ### hdfs://### HDFS PATH ###
--rw-rw-rw- 3 ### USER ### ### GROUP ### 7261 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-rw-rw- 3 ### USER ### ### GROUP ### 8914 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-rw-rw- 3 ### USER ### ### GROUP ### 7709 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-rw-rw- 3 ### USER ### ### GROUP ### 7284 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-rw-rw- 3 ### USER ### ### GROUP ### 7190 ### HDFS DATE ### hdfs://### HDFS PATH ###
PREHOOK: query: insert into over10k_orc_bucketed_n0 select * from over10k_n9
PREHOOK: type: QUERY
PREHOOK: Input: default@over10k_n9
@@ -394,14 +394,14 @@ POSTHOOK: Lineage: over10k_orc_bucketed_n0.si SIMPLE [(over10k_n9)over10k_n9.Fie
POSTHOOK: Lineage: over10k_orc_bucketed_n0.t SIMPLE [(over10k_n9)over10k_n9.FieldSchema(name:t, type:tinyint, comment:null), ]
POSTHOOK: Lineage: over10k_orc_bucketed_n0.ts SIMPLE [(over10k_n9)over10k_n9.FieldSchema(name:ts, type:timestamp, comment:null), ]
Found 8 items
--rw-rw-rw- 3 ### USER ### ### GROUP ### 8997 ### HDFS DATE ### hdfs://### HDFS PATH ###
--rw-rw-rw- 3 ### USER ### ### GROUP ### 8997 ### HDFS DATE ### hdfs://### HDFS PATH ###
--rw-rw-rw- 3 ### USER ### ### GROUP ### 7773 ### HDFS DATE ### hdfs://### HDFS PATH ###
--rw-rw-rw- 3 ### USER ### ### GROUP ### 7773 ### HDFS DATE ### hdfs://### HDFS PATH ###
--rw-rw-rw- 3 ### USER ### ### GROUP ### 7358 ### HDFS DATE ### hdfs://### HDFS PATH ###
--rw-rw-rw- 3 ### USER ### ### GROUP ### 7358 ### HDFS DATE ### hdfs://### HDFS PATH ###
--rw-rw-rw- 3 ### USER ### ### GROUP ### 7261 ### HDFS DATE ### hdfs://### HDFS PATH ###
--rw-rw-rw- 3 ### USER ### ### GROUP ### 7261 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-rw-rw- 3 ### USER ### ### GROUP ### 8914 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-rw-rw- 3 ### USER ### ### GROUP ### 8914 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-rw-rw- 3 ### USER ### ### GROUP ### 7709 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-rw-rw- 3 ### USER ### ### GROUP ### 7709 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-rw-rw- 3 ### USER ### ### GROUP ### 7284 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-rw-rw- 3 ### USER ### ### GROUP ### 7284 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-rw-rw- 3 ### USER ### ### GROUP ### 7190 ### HDFS DATE ### hdfs://### HDFS PATH ###
+-rw-rw-rw- 3 ### USER ### ### GROUP ### 7190 ### HDFS DATE ### hdfs://### HDFS PATH ###
PREHOOK: query: select distinct 7 as seven, INPUT__FILE__NAME from over10k_orc_bucketed_n0
PREHOOK: type: QUERY
PREHOOK: Input: default@over10k_orc_bucketed_n0
@@ -680,22 +680,22 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: over10k_orc_bucketed_n0
- Statistics: Num rows: 1247 Data size: 713720 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 1237 Data size: 707880 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: ROW__ID
- Statistics: Num rows: 1247 Data size: 713720 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 1237 Data size: 707880 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
keys: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
mode: hash
outputColumnNames: _col0, _col1
- Statistics: Num rows: 623 Data size: 52332 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
- Statistics: Num rows: 623 Data size: 52332 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Reducer 2
Reduce Operator Tree:
@@ -704,13 +704,13 @@ STAGE PLANS:
keys: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
mode: mergepartial
outputColumnNames: _col0, _col1
- Statistics: Num rows: 623 Data size: 52332 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (_col1 > 1L) (type: boolean)
- Statistics: Num rows: 207 Data size: 17388 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 207 Data size: 17388 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out b/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out
index a6eb4e5..dee97db 100644
--- a/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out
@@ -623,11 +623,11 @@ Stage-0
TableScan [TS_0] (rows=500/500 width=178)
default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
-PREHOOK: query: create table orc_merge5_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5_n1 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5_n1
-POSTHOOK: query: create table orc_merge5_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5_n1 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5_n1
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out b/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out
index fa72556..130029c 100644
--- a/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out
@@ -83,8 +83,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -204,8 +204,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_aggregate_9.q.out b/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
index fe7fbfd..9487881 100644
--- a/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
+++ b/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
@@ -158,8 +158,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -264,8 +264,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -370,8 +370,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_aggregate_without_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_aggregate_without_gby.q.out b/ql/src/test/results/clientpositive/vector_aggregate_without_gby.q.out
index 4d2b0dc..19641ee 100644
--- a/ql/src/test/results/clientpositive/vector_aggregate_without_gby.q.out
+++ b/ql/src/test/results/clientpositive/vector_aggregate_without_gby.q.out
@@ -97,8 +97,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_between_columns.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_between_columns.q.out b/ql/src/test/results/clientpositive/vector_between_columns.q.out
index 7967df1..f5e095b 100644
--- a/ql/src/test/results/clientpositive/vector_between_columns.q.out
+++ b/ql/src/test/results/clientpositive/vector_between_columns.q.out
@@ -173,8 +173,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -333,8 +333,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out b/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
index b66c0b0..ac9ef5c 100644
--- a/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
+++ b/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
@@ -170,7 +170,7 @@ STAGE PLANS:
1 _col10 (type: binary)
Map Join Vectorization:
bigTableKeyExpressions: col 10:binary
- bigTableValueExpressions: col 0:tinyint, col 1:smallint, col 2:int, col 3:bigint, col 4:float, col 5:double, col 6:boolean, col 7:string, col 8:timestamp, col 9:decimal(4,2), col 10:binary
+ bigTableValueExpressions: col 0:tinyint, col 1:smallint, col 2:int, col 3:bigint, col 4:float, col 5:double, col 6:boolean, col 7:string, col 8:timestamp, ConvertDecimal64ToDecimal(col 9:decimal(4,2)/DECIMAL_64) -> 12:decimal(4,2), col 10:binary
className: VectorMapJoinOperator
native: false
nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS true
@@ -211,8 +211,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -364,8 +364,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -582,8 +582,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_bround.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_bround.q.out b/ql/src/test/results/clientpositive/vector_bround.q.out
index 644902b..68086c2 100644
--- a/ql/src/test/results/clientpositive/vector_bround.q.out
+++ b/ql/src/test/results/clientpositive/vector_bround.q.out
@@ -91,8 +91,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
[05/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_case_when_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_case_when_2.q.out b/ql/src/test/results/clientpositive/vector_case_when_2.q.out
index b8a5214..76c7f3d 100644
--- a/ql/src/test/results/clientpositive/vector_case_when_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_case_when_2.q.out
@@ -392,8 +392,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -651,8 +651,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_cast_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/vector_cast_constant.q.out
index d8a534f..8c596a6 100644
--- a/ql/src/test/results/clientpositive/vector_cast_constant.q.out
+++ b/ql/src/test/results/clientpositive/vector_cast_constant.q.out
@@ -165,8 +165,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_char_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_2.q.out b/ql/src/test/results/clientpositive/vector_char_2.q.out
index 97038ee..dc2c1e4 100644
--- a/ql/src/test/results/clientpositive/vector_char_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_2.q.out
@@ -126,8 +126,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -314,8 +314,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_char_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_4.q.out b/ql/src/test/results/clientpositive/vector_char_4.q.out
index 5b9f272..8d27537 100644
--- a/ql/src/test/results/clientpositive/vector_char_4.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_4.q.out
@@ -174,8 +174,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out b/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out
index c98bb44..25199b8 100644
--- a/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out
@@ -213,8 +213,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -346,8 +346,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -481,8 +481,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_char_simple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_simple.q.out b/ql/src/test/results/clientpositive/vector_char_simple.q.out
index 43c3e48..a845c47 100644
--- a/ql/src/test/results/clientpositive/vector_char_simple.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_simple.q.out
@@ -70,8 +70,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -147,8 +147,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -236,8 +236,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_coalesce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_coalesce.q.out b/ql/src/test/results/clientpositive/vector_coalesce.q.out
index d1b12e6..0d20a0e 100644
--- a/ql/src/test/results/clientpositive/vector_coalesce.q.out
+++ b/ql/src/test/results/clientpositive/vector_coalesce.q.out
@@ -42,8 +42,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -127,8 +127,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -213,8 +213,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -293,8 +293,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -379,8 +379,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -458,8 +458,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_coalesce_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_coalesce_2.q.out b/ql/src/test/results/clientpositive/vector_coalesce_2.q.out
index c42d295..6030750 100644
--- a/ql/src/test/results/clientpositive/vector_coalesce_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_coalesce_2.q.out
@@ -93,8 +93,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -193,8 +193,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -288,8 +288,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -388,8 +388,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_coalesce_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_coalesce_3.q.out b/ql/src/test/results/clientpositive/vector_coalesce_3.q.out
index 6b7e21b..884078d 100644
--- a/ql/src/test/results/clientpositive/vector_coalesce_3.q.out
+++ b/ql/src/test/results/clientpositive/vector_coalesce_3.q.out
@@ -145,8 +145,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_coalesce_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_coalesce_4.q.out b/ql/src/test/results/clientpositive/vector_coalesce_4.q.out
index f359b22..c7c0da6 100644
--- a/ql/src/test/results/clientpositive/vector_coalesce_4.q.out
+++ b/ql/src/test/results/clientpositive/vector_coalesce_4.q.out
@@ -73,8 +73,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_count.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_count.q.out b/ql/src/test/results/clientpositive/vector_count.q.out
index 85d5926..10a144a 100644
--- a/ql/src/test/results/clientpositive/vector_count.q.out
+++ b/ql/src/test/results/clientpositive/vector_count.q.out
@@ -242,8 +242,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -327,8 +327,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_data_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_data_types.q.out b/ql/src/test/results/clientpositive/vector_data_types.q.out
index b72340d..dad2abd 100644
--- a/ql/src/test/results/clientpositive/vector_data_types.q.out
+++ b/ql/src/test/results/clientpositive/vector_data_types.q.out
@@ -244,8 +244,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -363,8 +363,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_date_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_date_1.q.out b/ql/src/test/results/clientpositive/vector_date_1.q.out
index affd786..c2e9e5d 100644
--- a/ql/src/test/results/clientpositive/vector_date_1.q.out
+++ b/ql/src/test/results/clientpositive/vector_date_1.q.out
@@ -131,8 +131,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -270,8 +270,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -409,8 +409,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -548,8 +548,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -691,8 +691,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -836,8 +836,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -943,8 +943,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_1.q.out b/ql/src/test/results/clientpositive/vector_decimal_1.q.out
index 80def64..ee6895b 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_1.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_1.q.out
@@ -66,15 +66,15 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToBoolean(t) (type: boolean)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToBoolean(col 0:decimal(4,2)) -> 4:boolean
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToBoolean(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:boolean
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: boolean)
@@ -89,8 +89,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -98,9 +98,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(4,2), bigint]
Reduce Vectorization:
enabled: false
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -157,15 +157,15 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToByte(t) (type: tinyint)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToLong(col 0:decimal(4,2)) -> 4:tinyint
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToLong(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:tinyint
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: tinyint)
@@ -180,8 +180,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -189,9 +189,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(4,2), bigint]
Reduce Vectorization:
enabled: false
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -248,15 +248,15 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToShort(t) (type: smallint)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToLong(col 0:decimal(4,2)) -> 4:smallint
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToLong(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:smallint
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: smallint)
@@ -271,8 +271,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -280,9 +280,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(4,2), bigint]
Reduce Vectorization:
enabled: false
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -339,15 +339,15 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToInteger(t) (type: int)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToLong(col 0:decimal(4,2)) -> 4:int
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToLong(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:int
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: int)
@@ -362,8 +362,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -371,9 +371,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(4,2), bigint]
Reduce Vectorization:
enabled: false
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -430,15 +430,15 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToLong(t) (type: bigint)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToLong(col 0:decimal(4,2)) -> 4:bigint
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToLong(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:bigint
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: bigint)
@@ -453,8 +453,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -462,9 +462,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint]
+ scratchColumnTypeNames: [decimal(4,2), bigint]
Reduce Vectorization:
enabled: false
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -521,15 +521,15 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToFloat(t) (type: float)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToFloat(col 0:decimal(4,2)) -> 4:float
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToFloat(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:float
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: float)
@@ -544,8 +544,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -553,9 +553,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [double]
+ scratchColumnTypeNames: [decimal(4,2), double]
Reduce Vectorization:
enabled: false
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -612,15 +612,15 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToDouble(t) (type: double)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToDouble(col 0:decimal(4,2)) -> 4:double
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToDouble(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:double
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: double)
@@ -635,8 +635,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -644,9 +644,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [double]
+ scratchColumnTypeNames: [decimal(4,2), double]
Reduce Vectorization:
enabled: false
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -703,15 +703,15 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: UDFToString(t) (type: string)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToString(col 0:decimal(4,2)) -> 4:string
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToString(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:string
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string)
@@ -726,8 +726,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -735,9 +735,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [string]
+ scratchColumnTypeNames: [decimal(4,2), string]
Reduce Vectorization:
enabled: false
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -794,15 +794,15 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:t:decimal(4,2), 1:u:decimal(5,0), 2:v:decimal(10,0), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:t:decimal(4,2)/DECIMAL_64, 1:u:decimal(5,0)/DECIMAL_64, 2:v:decimal(10,0)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: CAST( t AS TIMESTAMP) (type: timestamp)
outputColumnNames: _col0
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4]
- selectExpressions: CastDecimalToTimestamp(col 0:decimal(4,2)) -> 4:timestamp
+ projectedOutputColumnNums: [5]
+ selectExpressions: CastDecimalToTimestamp(col 4:decimal(4,2))(children: ConvertDecimal64ToDecimal(col 0:decimal(4,2)/DECIMAL_64) -> 4:decimal(4,2)) -> 5:timestamp
Statistics: Num rows: 2 Data size: 336 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: timestamp)
@@ -817,8 +817,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -826,9 +826,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0]
- dataColumns: t:decimal(4,2), u:decimal(5,0), v:decimal(10,0)
+ dataColumns: t:decimal(4,2)/DECIMAL_64, u:decimal(5,0)/DECIMAL_64, v:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [timestamp]
+ scratchColumnTypeNames: [decimal(4,2), timestamp]
Reduce Vectorization:
enabled: false
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out b/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out
index acb62df..6526abe 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out
@@ -56,7 +56,7 @@ STAGE PLANS:
Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:dec:decimal(10,0), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:dec:decimal(10,0)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: dec (type: decimal(10,0))
outputColumnNames: _col0
@@ -78,8 +78,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -87,7 +87,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 1
includeColumns: [0]
- dataColumns: dec:decimal(10,0)
+ dataColumns: dec:decimal(10,0)/DECIMAL_64
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Vectorization:
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_5.q.out b/ql/src/test/results/clientpositive/vector_decimal_5.q.out
index 0bfd12e..f3e6b72 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_5.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_5.q.out
@@ -129,6 +129,40 @@ NULL
124.00000
125.20000
200.00000
+PREHOOK: query: explain SELECT cast(key as decimal) FROM DECIMAL_5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain SELECT cast(key as decimal) FROM DECIMAL_5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: decimal_5
+ Statistics: Num rows: 38 Data size: 4072 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: CAST( key AS decimal(10,0)) (type: decimal(10,0))
+ outputColumnNames: _col0
+ Statistics: Num rows: 38 Data size: 4072 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 38 Data size: 4072 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5
PREHOOK: type: QUERY
PREHOOK: Input: default@decimal_5
@@ -137,42 +171,42 @@ POSTHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5
POSTHOOK: type: QUERY
POSTHOOK: Input: default@decimal_5
#### A masked pattern was here ####
--4400
+-440000000
NULL
0
0
-100
-10
-1
-0
-0
-200
-20
-2
-0
-0
-0
+10000000
+1000000
+100000
+10000
+1000
+20000000
+2000000
+200000
0
-0
-0
-0
-0
-0
-1
-2
-3
--1
--1
--1
-1
-1
-124
-125
--1255
-3
-3
-3
-1
+20000
+2000
+30000
+33000
+33300
+-30000
+-33000
+-33300
+100000
+200000
+314000
+-112000
+-112000
+-112200
+112000
+112200
+12400000
+12520000
+-125549000
+314000
+314000
+314000
+100000
NULL
NULL
PREHOOK: query: SELECT cast(key as decimal(6,3)) FROM DECIMAL_5
@@ -187,38 +221,38 @@ NULL
NULL
0.000
0.000
+NULL
+NULL
100.000
10.000
1.000
-0.100
-0.010
+NULL
+NULL
200.000
-20.000
-2.000
0.000
-0.200
-0.020
-0.300
-0.330
-0.333
--0.300
--0.330
--0.333
-1.000
+20.000
2.000
-3.140
--1.120
--1.120
--1.122
-1.120
-1.122
-124.000
-125.200
-NULL
-3.140
-3.140
-3.140
-1.000
+30.000
+33.000
+33.300
+-30.000
+-33.000
+-33.300
+100.000
+200.000
+314.000
+-112.000
+-112.000
+-112.200
+112.000
+112.200
+NULL
+NULL
+NULL
+314.000
+314.000
+314.000
+100.000
NULL
NULL
PREHOOK: query: DROP TABLE DECIMAL_5_txt
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_6.q.out b/ql/src/test/results/clientpositive/vector_decimal_6.q.out
index 2bc955a..445896b 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_6.q.out
@@ -129,7 +129,7 @@ STAGE PLANS:
Statistics: Num rows: 27 Data size: 2572 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(10,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(10,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: key (type: decimal(10,5)), value (type: int)
outputColumnNames: _col0, _col1
@@ -151,8 +151,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -160,7 +160,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(10,5), value:int
+ dataColumns: key:decimal(10,5)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Vectorization:
@@ -244,7 +244,7 @@ STAGE PLANS:
Statistics: Num rows: 27 Data size: 3020 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(17,4), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(17,4)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: key (type: decimal(17,4)), value (type: int)
outputColumnNames: _col0, _col1
@@ -266,8 +266,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -275,7 +275,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(17,4), value:int
+ dataColumns: key:decimal(17,4)/DECIMAL_64, value:int
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Vectorization:
@@ -511,7 +511,7 @@ STAGE PLANS:
Statistics: Num rows: 27 Data size: 2572 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:key:decimal(10,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:key:decimal(10,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: (key + 5.5) (type: decimal(11,5)), (value * 11) (type: int)
outputColumnNames: _col0, _col1
@@ -519,7 +519,7 @@ STAGE PLANS:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [3, 4]
- selectExpressions: DecimalColAddDecimalScalar(col 0:decimal(10,5), val 5.5) -> 3:decimal(11,5), LongColMultiplyLongScalar(col 1:int, val 11) -> 4:int
+ selectExpressions: Decimal64ColAddDecimal64Scalar(col 0:decimal(10,5)/DECIMAL_64, decimal64Val 550000, decimalVal 5.5) -> 3:decimal(11,5)/DECIMAL_64, LongColMultiplyLongScalar(col 1:int, val 11) -> 4:int
Statistics: Num rows: 27 Data size: 2572 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col1 (type: int)
@@ -535,8 +535,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -544,9 +544,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: key:decimal(10,5), value:int
+ dataColumns: key:decimal(10,5)/DECIMAL_64, value:int
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(11,5), bigint]
+ scratchColumnTypeNames: [decimal(11,5)/DECIMAL_64, bigint]
Reduce Vectorization:
enabled: false
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
index 04c534e..b9f4444 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
@@ -103,8 +103,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -245,8 +245,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_cast.q.out b/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
index 0850648..1c9ca38 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
@@ -56,8 +56,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
index c296c30..674d3f7 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
@@ -86,8 +86,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -199,12 +199,12 @@ STAGE PLANS:
Statistics: Num rows: 12288 Data size: 2127808 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:cdouble:double, 1:cdecimal1:decimal(10,3), 2:cdecimal2:decimal(7,2), 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:cdouble:double, 1:cdecimal1:decimal(10,3)/DECIMAL_64, 2:cdecimal2:decimal(7,2)/DECIMAL_64, 3:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterExprAndExpr(children: FilterDecimalColGreaterDecimalScalar(col 1:decimal(10,3), val 0), FilterDecimalColLessDecimalScalar(col 1:decimal(10,3), val 12345.5678), FilterDecimalColNotEqualDecimalScalar(col 2:decimal(7,2), val 0), FilterDecimalColGreaterDecimalScalar(col 2:decimal(7,2), val 1000), SelectColumnIsNotNull(col 0:double))
+ predicateExpression: FilterExprAndExpr(children: FilterDecimal64ColGreaterDecimal64Scalar(col 1:decimal(10,3)/DECIMAL_64, val 0), FilterDecimalColLessDecimalScalar(col 4:decimal(10,3), val 12345.5678)(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)), FilterDecimal64ColNotEqualDecimal64Scalar(col 2:decimal(7,2)/DECIMAL_64, val 0), FilterDecimal64ColGreaterDecimal64Scalar(col 2:decimal(7,2)/DECIMAL_64, val 100000), SelectColumnIsNotNull(col 0:double))
predicate: ((cdecimal1 < 12345.5678) and (cdecimal1 > 0) and (cdecimal2 <> 0) and (cdecimal2 > 1000) and cdouble is not null) (type: boolean)
Statistics: Num rows: 455 Data size: 78788 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -213,8 +213,8 @@ STAGE PLANS:
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [4, 6, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
- selectExpressions: DecimalColAddDecimalColumn(col 1:decimal(10,3), col 2:decimal(7,2)) -> 4:decimal(11,3), DecimalColSubtractDecimalColumn(col 1:decimal(10,3), col 5:decimal(9,2))(children: DecimalScalarMultiplyDecimalColumn(val 2, col 2:decimal(7,2)) -> 5:decimal(9,2)) -> 6:decimal(11,3), DecimalColDivideDecimalColumn(col 7:decimal(11,3), col 2:decimal(7,2))(children: DecimalColAddDecimalScalar(col 1:decimal(10,3), val 2.34) -> 7:decimal(11,3)) -> 8:decimal(21,11), DecimalColMultiplyDecimalColumn(col 1:decimal(10,3), col 9:decimal(12,6))(children: DecimalColDivideDecimalScalar(col 2:decimal(7,2), val 3.4) -> 9:decimal(12,6)) -> 10:decimal(23,9), DecimalColModuloDecimalScalar(col 1:decimal(10,3), val 10) -> 11:decimal(5,3), CastDecimalToLong(col 1:decimal(10,3)) -> 12:int, CastDecimalToLong(col 2:decimal(7,2)) -> 13:smallint, CastDecimalToLong(col 2:decimal(7,2)) -> 14:tinyint, CastDecimalToLong(col 1:decimal(10,3)) -> 15:bigint, CastDecimalToBoolean(col 1:decima
l(10,3)) -> 16:boolean, CastDecimalToDouble(col 2:decimal(7,2)) -> 17:double, CastDecimalToFloat(col 1:decimal(10,3)) -> 18:float, CastDecimalToString(col 2:decimal(7,2)) -> 19:string, CastDecimalToTimestamp(col 1:decimal(10,3)) -> 20:timestamp
+ projectedOutputColumnNums: [6, 8, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
+ selectExpressions: DecimalColAddDecimalColumn(col 4:decimal(10,3), col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3), ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 6:decimal(11,3), DecimalColSubtractDecimalColumn(col 4:decimal(10,3), col 7:decimal(9,2))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3), DecimalScalarMultiplyDecimalColumn(val 2, col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 7:decimal(9,2)) -> 8:decimal(11,3), DecimalColDivideDecimalColumn(col 23:decimal(11,3), col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 9:decimal(11,3)/DECIMAL_64)(children: Decimal64ColAddDecimal64Scalar(col 1:decimal(10,3)/DECIMAL_64, decimal64Val 2340, decimalVal 2.34) -> 9:decimal(11,3)/DECIMAL_64) -> 23:decimal(11,3), ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64)
-> 5:decimal(7,2)) -> 10:decimal(21,11), DecimalColMultiplyDecimalColumn(col 4:decimal(10,3), col 11:decimal(12,6))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3), DecimalColDivideDecimalScalar(col 5:decimal(7,2), val 3.4)(children: ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 11:decimal(12,6)) -> 12:decimal(23,9), DecimalColModuloDecimalScalar(col 4:decimal(10,3), val 10)(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)) -> 13:decimal(5,3), CastDecimalToLong(col 4:decimal(10,3))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)) -> 14:int, CastDecimalToLong(col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 15:smallint, CastDecimalToLong(col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 16:tinyint, CastDecimalToLong(col
4:decimal(10,3))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)) -> 17:bigint, CastDecimalToBoolean(col 4:decimal(10,3))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)) -> 18:boolean, CastDecimalToDouble(col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 19:double, CastDecimalToFloat(col 4:decimal(10,3))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)) -> 20:float, CastDecimalToString(col 5:decimal(7,2))(children: ConvertDecimal64ToDecimal(col 2:decimal(7,2)/DECIMAL_64) -> 5:decimal(7,2)) -> 21:string, CastDecimalToTimestamp(col 4:decimal(10,3))(children: ConvertDecimal64ToDecimal(col 1:decimal(10,3)/DECIMAL_64) -> 4:decimal(10,3)) -> 22:timestamp
Statistics: Num rows: 455 Data size: 78788 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: decimal(11,3)), _col1 (type: decimal(11,3)), _col2 (type: decimal(21,11)), _col3 (type: decimal(23,9)), _col4 (type: decimal(5,3)), _col5 (type: int), _col6 (type: smallint), _col7 (type: tinyint), _col8 (type: bigint), _col9 (type: boolean), _col10 (type: double), _col11 (type: float), _col12 (type: string), _col13 (type: timestamp)
@@ -230,8 +230,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -239,9 +239,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 3
includeColumns: [0, 1, 2]
- dataColumns: cdouble:double, cdecimal1:decimal(10,3), cdecimal2:decimal(7,2)
+ dataColumns: cdouble:double, cdecimal1:decimal(10,3)/DECIMAL_64, cdecimal2:decimal(7,2)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [decimal(11,3), decimal(9,2), decimal(11,3), decimal(11,3), decimal(21,11), decimal(12,6), decimal(23,9), decimal(5,3), bigint, bigint, bigint, bigint, bigint, double, double, string, timestamp]
+ scratchColumnTypeNames: [decimal(10,3), decimal(7,2), decimal(11,3), decimal(9,2), decimal(11,3), decimal(11,3)/DECIMAL_64, decimal(21,11), decimal(12,6), decimal(23,9), decimal(5,3), bigint, bigint, bigint, bigint, bigint, double, double, string, timestamp, decimal(11,3)]
Reduce Vectorization:
enabled: false
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
@@ -302,4 +302,4 @@ ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14) q
POSTHOOK: type: QUERY
POSTHOOK: Input: default@decimal_test_small_n0
#### A masked pattern was here ####
-774841630076
+1273824888155
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out
index 36e8810..bf3cf93 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out
@@ -167,8 +167,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -304,6 +304,17 @@ POSTHOOK: Input: default@t2_n29
9.00 9
9.00 9
9.00 9
+PREHOOK: query: select count(*) from (select t1_n48.`dec`, t2_n29.`dec` from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n48
+PREHOOK: Input: default@t2_n29
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_n48.`dec`, t2_n29.`dec` from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n48
+POSTHOOK: Input: default@t2_n29
+#### A masked pattern was here ####
+106
PREHOOK: query: explain vectorization detail
select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)
PREHOOK: type: QUERY
@@ -397,8 +408,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -534,6 +545,17 @@ POSTHOOK: Input: default@t2_n29
9.00 48.96 9 5
9.00 48.96 9 7
9.00 48.96 9 7
+PREHOOK: query: select count(*) from (select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_n48
+PREHOOK: Input: default@t2_n29
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_n48.`dec`, t1_n48.value_dec, t2_n29.`dec`, t2_n29.value_dec from t1_n48 join t2_n29 on (t1_n48.`dec`=t2_n29.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_n48
+POSTHOOK: Input: default@t2_n29
+#### A masked pattern was here ####
+106
PREHOOK: query: CREATE TABLE over1k_small(t tinyint,
si smallint,
i int,
@@ -735,6 +757,17 @@ POSTHOOK: Input: default@t1_small
POSTHOOK: Input: default@t2_small
#### A masked pattern was here ####
89.00 89
+PREHOOK: query: select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_small
+PREHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_small
+POSTHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+1
PREHOOK: query: explain vectorization detail
select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)
PREHOOK: type: QUERY
@@ -860,6 +893,17 @@ POSTHOOK: Input: default@t1_small
POSTHOOK: Input: default@t2_small
#### A masked pattern was here ####
89.00 15.09 89 15
+PREHOOK: query: select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_small
+PREHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_small
+POSTHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+1
PREHOOK: query: explain vectorization detail
select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)
PREHOOK: type: QUERY
@@ -986,6 +1030,17 @@ POSTHOOK: Input: default@t1_small
POSTHOOK: Input: default@t2_small
#### A masked pattern was here ####
89.00 89
+PREHOOK: query: select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_small
+PREHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_small.`dec`, t2_small.`dec` from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_small
+POSTHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+1
PREHOOK: query: explain vectorization detail
select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)
PREHOOK: type: QUERY
@@ -1112,3 +1167,14 @@ POSTHOOK: Input: default@t1_small
POSTHOOK: Input: default@t2_small
#### A masked pattern was here ####
89.00 15.09 89 15
+PREHOOK: query: select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1_small
+PREHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (select t1_small.`dec`, t1_small.value_dec, t2_small.`dec`, t2_small.value_dec from t1_small join t2_small on (t1_small.`dec`=t2_small.`dec`)) as t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1_small
+POSTHOOK: Input: default@t2_small
+#### A masked pattern was here ####
+1
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out
index 87596e7..417df95 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out
@@ -146,8 +146,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -357,12 +357,12 @@ STAGE PLANS:
Statistics: Num rows: 12288 Data size: 2201192 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:cbigint:bigint, 1:cdouble:double, 2:cdecimal1:decimal(12,4), 3:cdecimal2:decimal(14,8), 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:cbigint:bigint, 1:cdouble:double, 2:cdecimal1:decimal(12,4)/DECIMAL_64, 3:cdecimal2:decimal(14,8)/DECIMAL_64, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongScalar(col 5:bigint, val 0)(children: LongColModuloLongScalar(col 0:bigint, val 500) -> 5:bigint), FilterDoubleColGreaterEqualDoubleScalar(col 7:double, val -1.0)(children: FuncSinDoubleToDouble(col 6:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 6:double) -> 7:double))
+ predicateExpression: FilterExprAndExpr(children: FilterLongColEqualLongScalar(col 5:bigint, val 0)(children: LongColModuloLongScalar(col 0:bigint, val 500) -> 5:bigint), FilterDoubleColGreaterEqualDoubleScalar(col 8:double, val -1.0)(children: FuncSinDoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 7:double) -> 8:double))
predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= -1.0D)) (type: boolean)
Statistics: Num rows: 2048 Data size: 366865 Basic stats: COMPLETE Column stats: NONE
Select Operator
@@ -371,8 +371,8 @@ STAGE PLANS:
Select Vectorization:
className: VectorSelectOperator
native: true
- projectedOutputColumnNums: [2, 8, 9, 10, 11, 6, 12, 13, 14, 16, 17, 7, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 2, 29, 5, 30]
- selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 2:decimal(12,4), decimalPlaces 2) -> 8:decimal(11,2), FuncRoundDecimalToDecimal(col 2:decimal(12,4)) -> 9:decimal(9,0), FuncFloorDecimalToDecimal(col 2:decimal(12,4)) -> 10:decimal(9,0), FuncCeilDecimalToDecimal(col 2:decimal(12,4)) -> 11:decimal(9,0), RoundWithNumDigitsDoubleToDouble(col 7, decimalPlaces 58)(children: FuncExpDoubleToDouble(col 6:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 6:double) -> 7:double) -> 6:double, FuncLnDoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 7:double) -> 12:double, FuncLog10DoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 7:double) -> 13:double, FuncLog2DoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 7:double) -> 14:double, FuncLog2DoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 15:decimal(13,4))(children: DecimalColSubtractDecimal
Scalar(col 2:decimal(12,4), val 15601) -> 15:decimal(13,4)) -> 7:double) -> 16:double, FuncLogWithBaseDoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 7:double) -> 17:double, FuncPowerDoubleToDouble(col 18:double)(children: FuncLog2DoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 7:double) -> 18:double) -> 7:double, FuncPowerDoubleToDouble(col 19:double)(children: FuncLog2DoubleToDouble(col 18:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 18:double) -> 19:double) -> 18:double, FuncSqrtDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 20:double, FuncAbsDecimalToDecimal(col 2:decimal(12,4)) -> 21:decimal(12,4), FuncSinDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 22:double, FuncASinDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 23:double, FuncCosDoubleTo
Double(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 24:double, FuncACosDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 25:double, FuncATanDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 26:double, FuncDegreesDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 27:double, FuncRadiansDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:double) -> 28:double, FuncNegateDecimalToDecimal(col 2:decimal(12,4)) -> 29:decimal(12,4), FuncSignDecimalToLong(col 2:decimal(12,4)) -> 5:int, FuncCosDoubleToDouble(col 19:double)(children: DoubleColAddDoubleScalar(col 30:double, val 3.14159)(children: DoubleColUnaryMinus(col 19:double)(children: FuncSinDoubleToDouble(col 30:double)(children: FuncLnDoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 2:decimal(12,4)) -> 19:doubl
e) -> 30:double) -> 19:double) -> 30:double) -> 19:double) -> 30:double
+ projectedOutputColumnNums: [2, 9, 10, 11, 12, 7, 13, 14, 15, 17, 18, 8, 19, 21, 22, 23, 24, 25, 26, 27, 28, 29, 2, 30, 5, 31]
+ selectExpressions: FuncRoundWithNumDigitsDecimalToDecimal(col 6:decimal(12,4), decimalPlaces 2)(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 9:decimal(11,2), FuncRoundDecimalToDecimal(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 10:decimal(9,0), FuncFloorDecimalToDecimal(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 11:decimal(9,0), FuncCeilDecimalToDecimal(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 12:decimal(9,0), RoundWithNumDigitsDoubleToDouble(col 8, decimalPlaces 58)(children: FuncExpDoubleToDouble(col 7:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 7:double) -> 8:double) -> 7:double, FuncLnDoubleToDouble(col 8:
double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 8:double) -> 13:double, FuncLog10DoubleToDouble(col 8:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 8:double) -> 14:double, FuncLog2DoubleToDouble(col 8:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 8:double) -> 15:double, FuncLog2DoubleToDouble(col 8:double)(children: CastDecimalToDouble(col 32:decimal(13,4))(children: ConvertDecimal64ToDecimal(col 16:decimal(13,4)/DECIMAL_64)(children: Decimal64ColSubtractDecimal64Scalar(col 2:decimal(12,4)/DECIMAL_64, decimal64Val 156010000, decimalVal 15601) -> 16:decimal(13,4)/DECIMAL_64) -> 32:decimal(13,4)) -> 8:double) -> 17:double, FuncLogWithBaseDoubleToDouble(col 8:double)(children: Cast
DecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 8:double) -> 18:double, FuncPowerDoubleToDouble(col 19:double)(children: FuncLog2DoubleToDouble(col 8:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 8:double) -> 19:double) -> 8:double, FuncPowerDoubleToDouble(col 20:double)(children: FuncLog2DoubleToDouble(col 19:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 19:double) -> 20:double) -> 19:double, FuncSqrtDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 21:double, FuncAbsDecimalToDecimal(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMA
L_64) -> 6:decimal(12,4)) -> 22:decimal(12,4), FuncSinDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 23:double, FuncASinDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 24:double, FuncCosDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 25:double, FuncACosDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 26:double, FuncATanDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:d
ecimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 27:double, FuncDegreesDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 28:double, FuncRadiansDoubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 29:double, FuncNegateDecimalToDecimal(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 30:decimal(12,4), FuncSignDecimalToLong(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 5:int, FuncCosDoubleToDouble(col 20:double)(children: DoubleColAddDoubleScalar(col 31:double, val 3.14159)(children: DoubleColUnaryMinus(col 20:double)(children: FuncSinDoubleToDouble(col 31:double)(children: FuncLnDo
ubleToDouble(col 20:double)(children: CastDecimalToDouble(col 6:decimal(12,4))(children: ConvertDecimal64ToDecimal(col 2:decimal(12,4)/DECIMAL_64) -> 6:decimal(12,4)) -> 20:double) -> 31:double) -> 20:double) -> 31:double) -> 20:double) -> 31:double
Statistics: Num rows: 2048 Data size: 366865 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
@@ -388,8 +388,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -397,9 +397,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 4
includeColumns: [0, 2]
- dataColumns: cbigint:bigint, cdouble:double, cdecimal1:decimal(12,4), cdecimal2:decimal(14,8)
+ dataColumns: cbigint:bigint, cdouble:double, cdecimal1:decimal(12,4)/DECIMAL_64, cdecimal2:decimal(14,8)/DECIMAL_64
partitionColumnCount: 0
- scratchColumnTypeNames: [bigint, double, double, decimal(11,2), decimal(9,0), decimal(9,0), decimal(9,0), double, double, double, decimal(13,4), double, double, double, double, double, decimal(12,4), double, double, double, double, double, double, double, decimal(12,4), double]
+ scratchColumnTypeNames: [bigint, decimal(12,4), double, double, decimal(11,2), decimal(9,0), decimal(9,0), decimal(9,0), double, double, double, decimal(13,4)/DECIMAL_64, double, double, double, double, double, decimal(12,4), double, double, double, double, double, double, double, decimal(12,4), double, decimal(13,4)]
Stage: Stage-0
Fetch Operator
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_precision.q.out b/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
index fd6d9c3..e3e354f 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
@@ -604,8 +604,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
[28/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out b/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
index ad8aef0..b8ea5cf 100644
--- a/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
@@ -80,11 +80,13 @@ STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.c
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_ppd_staging_n1
-PREHOOK: query: insert overwrite table orc_ppd_staging_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging_n7 order by t, s
+PREHOOK: query: insert overwrite table orc_ppd_staging_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from staging_n7 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
PREHOOK: type: QUERY
PREHOOK: Input: default@staging_n7
PREHOOK: Output: default@orc_ppd_staging_n1
-POSTHOOK: query: insert overwrite table orc_ppd_staging_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging_n7 order by t, s
+POSTHOOK: query: insert overwrite table orc_ppd_staging_n1 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from staging_n7 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
POSTHOOK: type: QUERY
POSTHOOK: Input: default@staging_n7
POSTHOOK: Output: default@orc_ppd_staging_n1
@@ -177,11 +179,13 @@ STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.c
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_ppd_n2
-PREHOOK: query: insert overwrite table orc_ppd_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n1 order by t, s
+PREHOOK: query: insert overwrite table orc_ppd_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging_n1 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_staging_n1
PREHOOK: Output: default@orc_ppd_n2
-POSTHOOK: query: insert overwrite table orc_ppd_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n1 order by t, s
+POSTHOOK: query: insert overwrite table orc_ppd_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging_n1 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_ppd_staging_n1
POSTHOOK: Output: default@orc_ppd_n2
@@ -203,7 +207,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n2
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 16676
+ HDFS_BYTES_READ: 16675
HDFS_BYTES_WRITTEN: 104
HDFS_READ_OPS: 7
HDFS_LARGE_READ_OPS: 0
@@ -895,7 +899,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n2
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 5691
+ HDFS_BYTES_READ: 5911
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -916,9 +920,9 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
ALLOCATED_BYTES: 1310720
- ALLOCATED_USED_BYTES: 13796
+ ALLOCATED_USED_BYTES: 13810
CACHE_HIT_BYTES: 24
- CACHE_MISS_BYTES: 5691
+ CACHE_MISS_BYTES: 5911
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
NUM_VECTOR_BATCHES: 1
@@ -955,7 +959,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 6
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -993,7 +997,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 6
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1053,7 +1057,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 2100
RECORDS_OUT_OPERATOR_TS_0: 2100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 3
@@ -1091,7 +1095,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 0
RECORDS_OUT_OPERATOR_TS_0: 0
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 1735
+ CACHE_HIT_BYTES: 1726
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
SELECTED_ROWGROUPS: 0
@@ -1126,7 +1130,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 2
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1164,7 +1168,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 2
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1202,7 +1206,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 2
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1228,7 +1232,7 @@ Stage-1 FILE SYSTEM COUNTERS:
Stage-1 HIVE COUNTERS:
CREATED_FILES: 1
DESERIALIZE_ERRORS: 0
- RECORDS_IN_Map_1: 2000
+ RECORDS_IN_Map_1: 2100
RECORDS_OUT_0: 1
RECORDS_OUT_INTERMEDIATE_Map_1: 81
RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
@@ -1238,15 +1242,15 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_MAP_0: 0
RECORDS_OUT_OPERATOR_RS_3: 81
RECORDS_OUT_OPERATOR_SEL_2: 81
- RECORDS_OUT_OPERATOR_TS_0: 2000
+ RECORDS_OUT_OPERATOR_TS_0: 2100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
- NUM_DECODED_BATCHES: 2
- NUM_VECTOR_BATCHES: 2
- ROWS_EMITTED: 2000
- SELECTED_ROWGROUPS: 2
+ NUM_DECODED_BATCHES: 3
+ NUM_VECTOR_BATCHES: 3
+ ROWS_EMITTED: 2100
+ SELECTED_ROWGROUPS: 3
Stage-1 INPUT COUNTERS:
GROUPED_INPUT_SPLITS_Map_1: 1
INPUT_DIRECTORIES_Map_1: 1
@@ -1266,7 +1270,7 @@ Stage-1 FILE SYSTEM COUNTERS:
Stage-1 HIVE COUNTERS:
CREATED_FILES: 1
DESERIALIZE_ERRORS: 0
- RECORDS_IN_Map_1: 2000
+ RECORDS_IN_Map_1: 2100
RECORDS_OUT_0: 1
RECORDS_OUT_INTERMEDIATE_Map_1: 74
RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
@@ -1276,15 +1280,15 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_MAP_0: 0
RECORDS_OUT_OPERATOR_RS_3: 74
RECORDS_OUT_OPERATOR_SEL_2: 74
- RECORDS_OUT_OPERATOR_TS_0: 2000
+ RECORDS_OUT_OPERATOR_TS_0: 2100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
- NUM_DECODED_BATCHES: 2
- NUM_VECTOR_BATCHES: 2
- ROWS_EMITTED: 2000
- SELECTED_ROWGROUPS: 2
+ NUM_DECODED_BATCHES: 3
+ NUM_VECTOR_BATCHES: 3
+ ROWS_EMITTED: 2100
+ SELECTED_ROWGROUPS: 3
Stage-1 INPUT COUNTERS:
GROUPED_INPUT_SPLITS_Map_1: 1
INPUT_DIRECTORIES_Map_1: 1
@@ -1316,7 +1320,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 12
RECORDS_OUT_OPERATOR_TS_0: 2000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 2
@@ -1354,7 +1358,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 13
RECORDS_OUT_OPERATOR_TS_0: 2000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 2
@@ -1392,7 +1396,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 1
RECORDS_OUT_OPERATOR_TS_0: 100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1430,7 +1434,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 7
RECORDS_OUT_OPERATOR_TS_0: 1100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 2
@@ -1468,7 +1472,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 0
RECORDS_OUT_OPERATOR_TS_0: 0
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 1735
+ CACHE_HIT_BYTES: 1726
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
SELECTED_ROWGROUPS: 0
@@ -1503,7 +1507,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 0
RECORDS_OUT_OPERATOR_TS_0: 0
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 1735
+ CACHE_HIT_BYTES: 1726
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
SELECTED_ROWGROUPS: 0
@@ -1538,7 +1542,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 0
RECORDS_OUT_OPERATOR_TS_0: 0
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 1735
+ CACHE_HIT_BYTES: 1726
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
SELECTED_ROWGROUPS: 0
@@ -1573,7 +1577,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 2
RECORDS_OUT_OPERATOR_TS_0: 100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1611,7 +1615,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 6
RECORDS_OUT_OPERATOR_TS_0: 1100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 2
@@ -1649,7 +1653,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 2
RECORDS_OUT_OPERATOR_TS_0: 1000
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 6770
+ CACHE_HIT_BYTES: 6990
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1687,7 +1691,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 2
RECORDS_OUT_OPERATOR_TS_0: 100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 6770
+ CACHE_HIT_BYTES: 6990
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1705,7 +1709,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n2
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 4912
+ HDFS_BYTES_READ: 4896
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 4
HDFS_LARGE_READ_OPS: 0
@@ -1728,7 +1732,7 @@ Stage-1 LLAP IO COUNTERS:
ALLOCATED_BYTES: 524288
ALLOCATED_USED_BYTES: 8527
CACHE_HIT_BYTES: 24
- CACHE_MISS_BYTES: 4912
+ CACHE_MISS_BYTES: 4896
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 3
NUM_VECTOR_BATCHES: 3
@@ -1745,7 +1749,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n2
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 1751
+ HDFS_BYTES_READ: 1750
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 4
HDFS_LARGE_READ_OPS: 0
@@ -1767,8 +1771,8 @@ Stage-1 HIVE COUNTERS:
Stage-1 LLAP IO COUNTERS:
ALLOCATED_BYTES: 262144
ALLOCATED_USED_BYTES: 2376
- CACHE_HIT_BYTES: 4936
- CACHE_MISS_BYTES: 1751
+ CACHE_HIT_BYTES: 4920
+ CACHE_MISS_BYTES: 1750
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
NUM_VECTOR_BATCHES: 1
@@ -1805,7 +1809,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 2
RECORDS_OUT_OPERATOR_TS_0: 2100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 4936
+ CACHE_HIT_BYTES: 4920
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 3
@@ -1843,7 +1847,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 2
RECORDS_OUT_OPERATOR_TS_0: 100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 6687
+ CACHE_HIT_BYTES: 6670
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
@@ -1862,7 +1866,7 @@ PREHOOK: Input: default@orc_ppd_staging_n1
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_ppd_1
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 10129
+ HDFS_BYTES_READ: 10569
HDFS_BYTES_WRITTEN: 1467
HDFS_READ_OPS: 6
HDFS_LARGE_READ_OPS: 0
@@ -1881,9 +1885,9 @@ Stage-1 HIVE COUNTERS:
TOTAL_TABLE_ROWS_WRITTEN: 2
Stage-1 LLAP IO COUNTERS:
ALLOCATED_BYTES: 2359296
- ALLOCATED_USED_BYTES: 44166
- CACHE_HIT_BYTES: 30884
- CACHE_MISS_BYTES: 10129
+ ALLOCATED_USED_BYTES: 44198
+ CACHE_HIT_BYTES: 30613
+ CACHE_MISS_BYTES: 10569
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
NUM_VECTOR_BATCHES: 1
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out b/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out
index 433c2c3..45586be 100644
--- a/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out
@@ -80,11 +80,13 @@ STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.c
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_ppd_staging_n2
-PREHOOK: query: insert overwrite table orc_ppd_staging_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging_n8 order by t, s
+PREHOOK: query: insert overwrite table orc_ppd_staging_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from staging_n8 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
PREHOOK: type: QUERY
PREHOOK: Input: default@staging_n8
PREHOOK: Output: default@orc_ppd_staging_n2
-POSTHOOK: query: insert overwrite table orc_ppd_staging_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging_n8 order by t, s
+POSTHOOK: query: insert overwrite table orc_ppd_staging_n2 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, cast(ts as date) as da, `dec`, bin from staging_n8 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
POSTHOOK: type: QUERY
POSTHOOK: Input: default@staging_n8
POSTHOOK: Output: default@orc_ppd_staging_n2
@@ -177,11 +179,13 @@ STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.c
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_ppd_n3
-PREHOOK: query: insert overwrite table orc_ppd_n3 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n2 order by t, s
+PREHOOK: query: insert overwrite table orc_ppd_n3 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging_n2 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_staging_n2
PREHOOK: Output: default@orc_ppd_n3
-POSTHOOK: query: insert overwrite table orc_ppd_n3 select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n2 order by t, s
+POSTHOOK: query: insert overwrite table orc_ppd_n3 select t, si, i, b, f, d, bo, s, cast(s as char(50)) as c,
+cast(s as varchar(50)) as v, da, `dec`, bin from orc_ppd_staging_n2 order by t, si, i, b, f, d, bo, s, c, v, da, `dec`, bin
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_ppd_staging_n2
POSTHOOK: Output: default@orc_ppd_n3
@@ -203,7 +207,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 17011
+ HDFS_BYTES_READ: 17010
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 7
HDFS_LARGE_READ_OPS: 0
@@ -1071,7 +1075,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 16901
+ HDFS_BYTES_READ: 16900
HDFS_BYTES_WRITTEN: 104
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1103,7 +1107,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 17731
+ HDFS_BYTES_READ: 17730
HDFS_BYTES_WRITTEN: 104
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1135,7 +1139,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 16901
+ HDFS_BYTES_READ: 16900
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1167,7 +1171,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 17731
+ HDFS_BYTES_READ: 17730
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1199,7 +1203,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 16901
+ HDFS_BYTES_READ: 16900
HDFS_BYTES_WRITTEN: 102
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1231,7 +1235,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 17731
+ HDFS_BYTES_READ: 17730
HDFS_BYTES_WRITTEN: 102
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1263,7 +1267,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 4912
+ HDFS_BYTES_READ: 4896
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 4
HDFS_LARGE_READ_OPS: 0
@@ -1286,7 +1290,7 @@ Stage-1 LLAP IO COUNTERS:
ALLOCATED_BYTES: 524288
ALLOCATED_USED_BYTES: 8527
CACHE_HIT_BYTES: 24
- CACHE_MISS_BYTES: 4912
+ CACHE_MISS_BYTES: 4896
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 3
NUM_VECTOR_BATCHES: 3
@@ -1303,7 +1307,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 1751
+ HDFS_BYTES_READ: 1750
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 4
HDFS_LARGE_READ_OPS: 0
@@ -1325,8 +1329,8 @@ Stage-1 HIVE COUNTERS:
Stage-1 LLAP IO COUNTERS:
ALLOCATED_BYTES: 262144
ALLOCATED_USED_BYTES: 2376
- CACHE_HIT_BYTES: 4936
- CACHE_MISS_BYTES: 1751
+ CACHE_HIT_BYTES: 4920
+ CACHE_MISS_BYTES: 1750
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
NUM_VECTOR_BATCHES: 1
@@ -1347,7 +1351,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 21461
+ HDFS_BYTES_READ: 21443
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1379,7 +1383,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 23339
+ HDFS_BYTES_READ: 23321
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1415,7 +1419,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 21461
+ HDFS_BYTES_READ: 21443
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1447,7 +1451,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 23339
+ HDFS_BYTES_READ: 23321
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1479,7 +1483,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 4099
+ HDFS_BYTES_READ: 4322
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 4
HDFS_LARGE_READ_OPS: 0
@@ -1500,9 +1504,9 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_TS_0: 2100
Stage-1 LLAP IO COUNTERS:
ALLOCATED_BYTES: 1048576
- ALLOCATED_USED_BYTES: 11420
+ ALLOCATED_USED_BYTES: 11434
CACHE_HIT_BYTES: 24
- CACHE_MISS_BYTES: 4099
+ CACHE_MISS_BYTES: 4322
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 3
NUM_VECTOR_BATCHES: 3
@@ -1519,7 +1523,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 1592
+ HDFS_BYTES_READ: 1589
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 4
HDFS_LARGE_READ_OPS: 0
@@ -1541,8 +1545,8 @@ Stage-1 HIVE COUNTERS:
Stage-1 LLAP IO COUNTERS:
ALLOCATED_BYTES: 262144
ALLOCATED_USED_BYTES: 2376
- CACHE_HIT_BYTES: 4123
- CACHE_MISS_BYTES: 1592
+ CACHE_HIT_BYTES: 4346
+ CACHE_MISS_BYTES: 1589
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 3
NUM_VECTOR_BATCHES: 3
@@ -1563,7 +1567,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 20632
+ HDFS_BYTES_READ: 20860
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1595,7 +1599,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 22367
+ HDFS_BYTES_READ: 22586
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1651,7 +1655,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 6
RECORDS_OUT_OPERATOR_TS_0: 2100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 4123
+ CACHE_HIT_BYTES: 4346
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 3
@@ -1689,7 +1693,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 6
RECORDS_OUT_OPERATOR_TS_0: 2100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 3
@@ -1711,7 +1715,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 20632
+ HDFS_BYTES_READ: 20860
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1743,7 +1747,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 22367
+ HDFS_BYTES_READ: 22586
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1799,7 +1803,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 6
RECORDS_OUT_OPERATOR_TS_0: 2100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 4123
+ CACHE_HIT_BYTES: 4346
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 3
@@ -1837,7 +1841,7 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_SEL_2: 6
RECORDS_OUT_OPERATOR_TS_0: 2100
Stage-1 LLAP IO COUNTERS:
- CACHE_HIT_BYTES: 5715
+ CACHE_HIT_BYTES: 5935
CACHE_MISS_BYTES: 0
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 3
@@ -1859,7 +1863,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 2183
+ HDFS_BYTES_READ: 2062
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 4
HDFS_LARGE_READ_OPS: 0
@@ -1880,9 +1884,9 @@ Stage-1 HIVE COUNTERS:
RECORDS_OUT_OPERATOR_TS_0: 2100
Stage-1 LLAP IO COUNTERS:
ALLOCATED_BYTES: 786432
- ALLOCATED_USED_BYTES: 4293
+ ALLOCATED_USED_BYTES: 4264
CACHE_HIT_BYTES: 24
- CACHE_MISS_BYTES: 2183
+ CACHE_MISS_BYTES: 2062
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 3
NUM_VECTOR_BATCHES: 3
@@ -1899,7 +1903,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 18750
+ HDFS_BYTES_READ: 18628
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
@@ -1931,7 +1935,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 1217
+ HDFS_BYTES_READ: 1215
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 4
HDFS_LARGE_READ_OPS: 0
@@ -1953,8 +1957,8 @@ Stage-1 HIVE COUNTERS:
Stage-1 LLAP IO COUNTERS:
ALLOCATED_BYTES: 262144
ALLOCATED_USED_BYTES: 2376
- CACHE_HIT_BYTES: 2207
- CACHE_MISS_BYTES: 1217
+ CACHE_HIT_BYTES: 2086
+ CACHE_MISS_BYTES: 1215
METADATA_CACHE_HIT: 2
NUM_DECODED_BATCHES: 1
NUM_VECTOR_BATCHES: 1
@@ -1971,7 +1975,7 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@orc_ppd_n3
PREHOOK: Output: hdfs://### HDFS PATH ###
Stage-1 FILE SYSTEM COUNTERS:
- HDFS_BYTES_READ: 20076
+ HDFS_BYTES_READ: 19952
HDFS_BYTES_WRITTEN: 101
HDFS_READ_OPS: 5
HDFS_LARGE_READ_OPS: 0
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_split_elimination.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_split_elimination.q.out b/ql/src/test/results/clientpositive/llap/orc_split_elimination.q.out
index b26a28d..53119a6 100644
--- a/ql/src/test/results/clientpositive/llap/orc_split_elimination.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_split_elimination.q.out
@@ -1,8 +1,8 @@
-PREHOOK: query: create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_split_elim
-POSTHOOK: query: create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_split_elim
@@ -158,11 +158,11 @@ POSTHOOK: Input: default@orc_split_elim
29 cat 8.0 3 1969-12-31 16:00:10
5 eat 0.8 6 1969-12-31 16:00:20
70 dog 1.8 4 1969-12-31 16:00:15
-PREHOOK: query: create table orc_split_elim_part (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (country string, year int) stored as orc
+PREHOOK: query: create table orc_split_elim_part (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (country string, year int) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_split_elim_part
-POSTHOOK: query: create table orc_split_elim_part (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (country string, year int) stored as orc
+POSTHOOK: query: create table orc_split_elim_part (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (country string, year int) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_split_elim_part
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out b/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out
index 4cd56f8..da69c5f 100644
--- a/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out
@@ -126,8 +126,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -267,8 +267,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_part_llap_io.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_part_llap_io.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_part_llap_io.q.out
index d907fc8..f58338f 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_part_llap_io.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_part_llap_io.q.out
@@ -93,8 +93,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -235,8 +235,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -431,8 +431,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -566,8 +566,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -699,8 +699,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -823,8 +823,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -951,8 +951,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1070,8 +1070,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1191,8 +1191,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1303,8 +1303,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1438,8 +1438,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1564,8 +1564,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1676,8 +1676,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1767,8 +1767,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_table.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_table.q.out
index 9319218..9dddc12 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_table.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_table.q.out
@@ -92,8 +92,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -238,8 +238,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -446,8 +446,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -578,8 +578,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -707,8 +707,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -827,8 +827,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -951,8 +951,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1066,8 +1066,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1183,8 +1183,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1291,8 +1291,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1422,8 +1422,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1544,8 +1544,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1652,8 +1652,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1739,8 +1739,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_table_llap_io.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_table_llap_io.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_table_llap_io.q.out
index 6fe97fb..0f3c600 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_table_llap_io.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_acidvec_table_llap_io.q.out
@@ -93,8 +93,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -240,8 +240,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -449,8 +449,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -582,8 +582,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -712,8 +712,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -833,8 +833,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -958,8 +958,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1074,8 +1074,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1192,8 +1192,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1301,8 +1301,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1433,8 +1433,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1556,8 +1556,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1665,8 +1665,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1753,8 +1753,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out
index 131d0fa..69167ef 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part.q.out
@@ -107,8 +107,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -266,8 +266,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -479,8 +479,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -631,8 +631,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -852,8 +852,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1063,8 +1063,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1260,8 +1260,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1485,8 +1485,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1652,8 +1652,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out
index bff87f9..78b5231 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out
@@ -181,8 +181,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -485,8 +485,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -715,8 +715,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex_llap_io.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex_llap_io.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex_llap_io.q.out
index 06ae325..c786684 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex_llap_io.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex_llap_io.q.out
@@ -182,8 +182,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -487,8 +487,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -718,8 +718,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
index 966ab8f..c835afd 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
@@ -292,7 +292,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ vectorizationSupportRemovedReasons: [[] is disabled because it is not in hive.vectorized.input.format.supports.enabled []]
featureSupportInUse: []
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
@@ -543,7 +544,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ vectorizationSupportRemovedReasons: [[] is disabled because it is not in hive.vectorized.input.format.supports.enabled []]
featureSupportInUse: []
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
@@ -716,7 +718,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ vectorizationSupportRemovedReasons: [[] is disabled because it is not in hive.vectorized.input.format.supports.enabled []]
featureSupportInUse: []
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
@@ -873,7 +876,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ vectorizationSupportRemovedReasons: [[] is disabled because it is not in hive.vectorized.input.format.supports.enabled []]
featureSupportInUse: []
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
@@ -1111,7 +1115,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ vectorizationSupportRemovedReasons: [[] is disabled because it is not in hive.vectorized.input.format.supports.enabled []]
featureSupportInUse: []
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive_llap_io.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive_llap_io.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive_llap_io.q.out
index be57603..9c2460f 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive_llap_io.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive_llap_io.q.out
@@ -293,7 +293,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ vectorizationSupportRemovedReasons: [[] is disabled because it is not in hive.vectorized.input.format.supports.enabled []]
featureSupportInUse: []
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
@@ -545,7 +546,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ vectorizationSupportRemovedReasons: [[] is disabled because it is not in hive.vectorized.input.format.supports.enabled []]
featureSupportInUse: []
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
@@ -719,7 +721,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ vectorizationSupportRemovedReasons: [[] is disabled because it is not in hive.vectorized.input.format.supports.enabled []]
featureSupportInUse: []
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
@@ -877,7 +880,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ vectorizationSupportRemovedReasons: [[] is disabled because it is not in hive.vectorized.input.format.supports.enabled []]
featureSupportInUse: []
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
@@ -1116,7 +1120,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ vectorizationSupportRemovedReasons: [[] is disabled because it is not in hive.vectorized.input.format.supports.enabled []]
featureSupportInUse: []
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
index ef65472..6973081 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
@@ -107,8 +107,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -271,8 +271,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -497,8 +497,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -794,8 +794,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1000,8 +1000,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table_llap_io.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table_llap_io.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table_llap_io.q.out
index 5118f2d..51d72d7 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table_llap_io.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table_llap_io.q.out
@@ -108,8 +108,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -273,8 +273,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -500,8 +500,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -798,8 +798,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1005,8 +1005,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex_llap_io.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex_llap_io.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex_llap_io.q.out
index 5052fe6..ca2bfb7 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex_llap_io.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex_llap_io.q.out
@@ -92,8 +92,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -336,8 +335,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -497,8 +495,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -745,8 +742,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive_llap_io.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive_llap_io.q.out b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive_llap_io.q.out
index df136bf..8765301 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive_llap_io.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive_llap_io.q.out
@@ -182,8 +182,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -513,8 +512,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -775,8 +773,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -992,8 +989,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1198,7 +1194,7 @@ STAGE PLANS:
Statistics: Num rows: 5 Data size: 3595 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:insert_num:int, 1:c1:char(12), 2:c2:char(25), 3:c3:varchar(25), 4:c4:varchar(10), 5:c5:decimal(12,4), 6:c6:decimal(20,10), 7:b:string, 8:part:int, 9:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:insert_num:int, 1:c1:char(12), 2:c2:char(25), 3:c3:varchar(25), 4:c4:varchar(10), 5:c5:decimal(12,4)/DECIMAL_64, 6:c6:decimal(20,10), 7:b:string, 8:part:int, 9:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: insert_num (type: int), part (type: int), c1 (type: char(12)), c2 (type: char(25)), c3 (type: varchar(25)), c4 (type: varchar(10)), c5 (type: decimal(12,4)), c6 (type: decimal(20,10)), b (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
@@ -1223,8 +1219,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1232,7 +1227,7 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 8
includeColumns: [0, 1, 2, 3, 4, 5, 6, 7]
- dataColumns: insert_num:int, c1:char(12), c2:char(25), c3:varchar(25), c4:varchar(10), c5:decimal(12,4), c6:decimal(20,10), b:string
+ dataColumns: insert_num:int, c1:char(12), c2:char(25), c3:varchar(25), c4:varchar(10), c5:decimal(12,4)/DECIMAL_64, c6:decimal(20,10), b:string
partitionColumnCount: 1
partitionColumns: part:int
scratchColumnTypeNames: []
[12/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_mapjoin3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin3.q.out b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin3.q.out
index f05e5c0..053826e 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_mapjoin3.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_mapjoin3.q.out
@@ -122,12 +122,12 @@ STAGE PLANS:
Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:decimal0801_col:decimal(8,1), 1:int_col_1:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:decimal0801_col:decimal(8,1)/DECIMAL_64, 1:int_col_1:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 0:decimal(8,1)), SelectColumnIsNotNull(col 1:int))
+ predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 3:decimal(8,1))(children: ConvertDecimal64ToDecimal(col 0:decimal(8,1)/DECIMAL_64) -> 3:decimal(8,1)), SelectColumnIsNotNull(col 1:int))
predicate: (decimal0801_col is not null and int_col_1 is not null) (type: boolean)
Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -146,12 +146,13 @@ STAGE PLANS:
1 _col0 (type: int)
Map Join Vectorization:
bigTableKeyColumnNums: [1]
- bigTableRetainedColumnNums: [0]
- bigTableValueColumnNums: [0]
+ bigTableRetainedColumnNums: [3]
+ bigTableValueColumnNums: [3]
+ bigTableValueExpressions: ConvertDecimal64ToDecimal(col 0:decimal(8,1)/DECIMAL_64) -> 3:decimal(8,1)
className: VectorMapJoinInnerBigOnlyLongOperator
native: true
nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS true
- projectedOutputColumnNums: [0]
+ projectedOutputColumnNums: [3]
outputColumnNames: _col0
input vertices:
1 Reducer 3
@@ -171,8 +172,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -180,9 +181,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: decimal0801_col:decimal(8,1), int_col_1:int
+ dataColumns: decimal0801_col:decimal(8,1)/DECIMAL_64, int_col_1:int
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(8,1)]
Map 2
Map Operator Tree:
TableScan
@@ -226,8 +227,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -325,12 +326,12 @@ STAGE PLANS:
Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:decimal0801_col:decimal(8,1), 1:int_col_1:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:decimal0801_col:decimal(8,1)/DECIMAL_64, 1:int_col_1:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 1:int), SelectColumnIsNotNull(col 0:decimal(8,1)))
+ predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 1:int), SelectColumnIsNotNull(col 3:decimal(8,1))(children: ConvertDecimal64ToDecimal(col 0:decimal(8,1)/DECIMAL_64) -> 3:decimal(8,1)))
predicate: (decimal0801_col is not null and int_col_1 is not null) (type: boolean)
Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -349,12 +350,13 @@ STAGE PLANS:
1 _col0 (type: int)
Map Join Vectorization:
bigTableKeyColumnNums: [1]
- bigTableRetainedColumnNums: [0]
- bigTableValueColumnNums: [0]
+ bigTableRetainedColumnNums: [3]
+ bigTableValueColumnNums: [3]
+ bigTableValueExpressions: ConvertDecimal64ToDecimal(col 0:decimal(8,1)/DECIMAL_64) -> 3:decimal(8,1)
className: VectorMapJoinInnerBigOnlyLongOperator
native: true
nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS true
- projectedOutputColumnNums: [0]
+ projectedOutputColumnNums: [3]
outputColumnNames: _col0
input vertices:
1 Reducer 3
@@ -374,8 +376,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -383,9 +385,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: decimal0801_col:decimal(8,1), int_col_1:int
+ dataColumns: decimal0801_col:decimal(8,1)/DECIMAL_64, int_col_1:int
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(8,1)]
Map 2
Map Operator Tree:
TableScan
@@ -429,8 +431,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -528,12 +530,12 @@ STAGE PLANS:
Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE Column stats: COMPLETE
TableScan Vectorization:
native: true
- vectorizationSchemaColumns: [0:decimal0801_col:decimal(8,1), 1:int_col_1:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+ vectorizationSchemaColumns: [0:decimal0801_col:decimal(8,1)/DECIMAL_64, 1:int_col_1:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Filter Operator
Filter Vectorization:
className: VectorFilterOperator
native: true
- predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 0:decimal(8,1)), SelectColumnIsNotNull(col 1:int))
+ predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 3:decimal(8,1))(children: ConvertDecimal64ToDecimal(col 0:decimal(8,1)/DECIMAL_64) -> 3:decimal(8,1)), SelectColumnIsNotNull(col 1:int))
predicate: (decimal0801_col is not null and int_col_1 is not null) (type: boolean)
Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
@@ -552,12 +554,13 @@ STAGE PLANS:
1 _col0 (type: int)
Map Join Vectorization:
bigTableKeyColumnNums: [1]
- bigTableRetainedColumnNums: [0]
- bigTableValueColumnNums: [0]
+ bigTableRetainedColumnNums: [3]
+ bigTableValueColumnNums: [3]
+ bigTableValueExpressions: ConvertDecimal64ToDecimal(col 0:decimal(8,1)/DECIMAL_64) -> 3:decimal(8,1)
className: VectorMapJoinInnerBigOnlyLongOperator
native: true
nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS true
- projectedOutputColumnNums: [0]
+ projectedOutputColumnNums: [3]
outputColumnNames: _col0
input vertices:
1 Reducer 3
@@ -577,8 +580,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -586,9 +589,9 @@ STAGE PLANS:
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
- dataColumns: decimal0801_col:decimal(8,1), int_col_1:int
+ dataColumns: decimal0801_col:decimal(8,1)/DECIMAL_64, int_col_1:int
partitionColumnCount: 0
- scratchColumnTypeNames: []
+ scratchColumnTypeNames: [decimal(8,1)]
Map 2
Map Operator Tree:
TableScan
@@ -632,8 +635,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out b/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out
index 35786eb..57b5845 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_math_funcs.q.out
@@ -153,8 +153,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_nested_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_nested_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/vectorized_nested_mapjoin.q.out
index d27a123..7546dbb 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_nested_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_nested_mapjoin.q.out
@@ -69,8 +69,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -98,8 +98,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -126,8 +126,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
index 045a6ad..53bd3c9 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
@@ -167,8 +167,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -387,8 +387,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -430,8 +430,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -640,8 +640,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -799,8 +799,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1018,8 +1018,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1240,8 +1240,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1464,8 +1464,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1507,8 +1507,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1685,8 +1685,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -1723,8 +1723,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2307,8 +2307,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2525,8 +2525,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -2813,8 +2813,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3035,8 +3035,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3078,8 +3078,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3328,8 +3328,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -3566,8 +3566,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3829,8 +3829,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4345,8 +4345,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4663,8 +4663,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -4977,8 +4977,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5299,8 +5299,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5632,8 +5632,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -5934,8 +5934,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out b/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out
index 690f0ae..68c56ed 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_shufflejoin.q.out
@@ -60,8 +60,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -102,8 +102,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_string_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_string_funcs.q.out b/ql/src/test/results/clientpositive/llap/vectorized_string_funcs.q.out
index 1f6e152..aecd67e 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_string_funcs.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_string_funcs.q.out
@@ -79,8 +79,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out b/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out
index 8006a71..dfab9db 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_timestamp.q.out
@@ -166,8 +166,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -298,8 +298,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -391,8 +391,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -532,8 +532,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out
index 7ce15ae..654dab9 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_funcs.q.out
@@ -285,8 +285,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -495,8 +495,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: true
@@ -689,8 +689,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: true
@@ -883,8 +883,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: true
@@ -1026,8 +1026,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1153,8 +1153,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1298,8 +1298,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out
index 04cb482..82d43c1 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_timestamp_ints_casts.q.out
@@ -85,8 +85,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
@@ -249,8 +249,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: true
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/mergejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mergejoin.q.out b/ql/src/test/results/clientpositive/mergejoin.q.out
index 22d826b..7cbcbbe 100644
--- a/ql/src/test/results/clientpositive/mergejoin.q.out
+++ b/ql/src/test/results/clientpositive/mergejoin.q.out
@@ -2982,8 +2982,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -3141,8 +3141,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/orc_file_dump.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_file_dump.q.out b/ql/src/test/results/clientpositive/orc_file_dump.q.out
index 8ec71a9..5101312 100644
--- a/ql/src/test/results/clientpositive/orc_file_dump.q.out
+++ b/ql/src/test/results/clientpositive/orc_file_dump.q.out
@@ -111,7 +111,7 @@ Stripe Statistics:
Column 7: count: 1049 hasNull: false bytesOnDisk: 137 true: 526
Column 8: count: 1049 hasNull: false bytesOnDisk: 3430 min: max: zach zipper sum: 13443
Column 9: count: 1049 hasNull: false bytesOnDisk: 1802 min: 2013-03-01 09:11:58.703 max: 2013-03-01 09:11:58.703 min UTC: 2013-03-01 01:11:58.703 max UTC: 2013-03-01 01:11:58.703
- Column 10: count: 1049 hasNull: false bytesOnDisk: 2369 min: 0 max: 99.94 sum: 53646.16
+ Column 10: count: 1049 hasNull: false bytesOnDisk: 2181 min: 0 max: 99.94 sum: 53646.16
Column 11: count: 1049 hasNull: false bytesOnDisk: 2468 sum: 13278
File Statistics:
@@ -125,11 +125,11 @@ File Statistics:
Column 7: count: 1049 hasNull: false bytesOnDisk: 137 true: 526
Column 8: count: 1049 hasNull: false bytesOnDisk: 3430 min: max: zach zipper sum: 13443
Column 9: count: 1049 hasNull: false bytesOnDisk: 1802 min: 2013-03-01 09:11:58.703 max: 2013-03-01 09:11:58.703 min UTC: 2013-03-01 01:11:58.703 max UTC: 2013-03-01 01:11:58.703
- Column 10: count: 1049 hasNull: false bytesOnDisk: 2369 min: 0 max: 99.94 sum: 53646.16
+ Column 10: count: 1049 hasNull: false bytesOnDisk: 2181 min: 0 max: 99.94 sum: 53646.16
Column 11: count: 1049 hasNull: false bytesOnDisk: 2468 sum: 13278
Stripes:
- Stripe: offset: 3 data: 22593 rows: 1049 tail: 252 index: 8956
+ Stripe: offset: 3 data: 22405 rows: 1049 tail: 253 index: 8956
Stream: column 0 section ROW_INDEX start: 3 length 20
Stream: column 0 section BLOOM_FILTER_UTF8 start: 23 length 34
Stream: column 1 section ROW_INDEX start: 57 length 58
@@ -168,10 +168,10 @@ Stripes:
Stream: column 8 section DICTIONARY_DATA start: 23365 length 1548
Stream: column 9 section DATA start: 24913 length 19
Stream: column 9 section SECONDARY start: 24932 length 1783
- Stream: column 10 section DATA start: 26715 length 2138
- Stream: column 10 section SECONDARY start: 28853 length 231
- Stream: column 11 section DATA start: 29084 length 1877
- Stream: column 11 section LENGTH start: 30961 length 591
+ Stream: column 10 section DATA start: 26715 length 2166
+ Stream: column 10 section SECONDARY start: 28881 length 15
+ Stream: column 11 section DATA start: 28896 length 1877
+ Stream: column 11 section LENGTH start: 30773 length 591
Encoding column 0: DIRECT
Encoding column 1: DIRECT
Encoding column 2: DIRECT_V2
@@ -256,7 +256,7 @@ Stripes:
Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 4 loadFactor: 0.0006 expectedFpp: 1.6543056E-13
Row group indices for column 10:
Entry 0: count: 1000 hasNull: false min: 0 max: 9994 sum: 5118211 positions: 0,0,0,0,0
- Entry 1: count: 49 hasNull: false min: 0 max: 9490 sum: 246405 positions: 0,2159,0,476,4
+ Entry 1: count: 49 hasNull: false min: 0 max: 9490 sum: 246405 positions: 0,2194,0,4,488
Bloom filters for column 10:
Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 2848 loadFactor: 0.4541 expectedFpp: 0.042514365
Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 194 loadFactor: 0.0309 expectedFpp: 9.153406E-7
@@ -269,7 +269,7 @@ Stripes:
Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 98 loadFactor: 0.0156 expectedFpp: 5.9604645E-8
Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 102 loadFactor: 0.0163 expectedFpp: 6.9948186E-8
-File length: 32494 bytes
+File length: 32309 bytes
Padding length: 0 bytes
Padding ratio: 0%
________________________________________________________________________________________________________________________
@@ -308,7 +308,7 @@ Stripe Statistics:
Column 7: count: 1049 hasNull: false bytesOnDisk: 137 true: 526
Column 8: count: 1049 hasNull: false bytesOnDisk: 3430 min: max: zach zipper sum: 13443
Column 9: count: 1049 hasNull: false bytesOnDisk: 1802 min: 2013-03-01 09:11:58.703 max: 2013-03-01 09:11:58.703 min UTC: 2013-03-01 01:11:58.703 max UTC: 2013-03-01 01:11:58.703
- Column 10: count: 1049 hasNull: false bytesOnDisk: 2369 min: 0 max: 99.94 sum: 53646.16
+ Column 10: count: 1049 hasNull: false bytesOnDisk: 2181 min: 0 max: 99.94 sum: 53646.16
Column 11: count: 1049 hasNull: false bytesOnDisk: 2468 sum: 13278
File Statistics:
@@ -322,11 +322,11 @@ File Statistics:
Column 7: count: 1049 hasNull: false bytesOnDisk: 137 true: 526
Column 8: count: 1049 hasNull: false bytesOnDisk: 3430 min: max: zach zipper sum: 13443
Column 9: count: 1049 hasNull: false bytesOnDisk: 1802 min: 2013-03-01 09:11:58.703 max: 2013-03-01 09:11:58.703 min UTC: 2013-03-01 01:11:58.703 max UTC: 2013-03-01 01:11:58.703
- Column 10: count: 1049 hasNull: false bytesOnDisk: 2369 min: 0 max: 99.94 sum: 53646.16
+ Column 10: count: 1049 hasNull: false bytesOnDisk: 2181 min: 0 max: 99.94 sum: 53646.16
Column 11: count: 1049 hasNull: false bytesOnDisk: 2468 sum: 13278
Stripes:
- Stripe: offset: 3 data: 22593 rows: 1049 tail: 250 index: 13603
+ Stripe: offset: 3 data: 22405 rows: 1049 tail: 248 index: 13603
Stream: column 0 section ROW_INDEX start: 3 length 20
Stream: column 0 section BLOOM_FILTER_UTF8 start: 23 length 43
Stream: column 1 section ROW_INDEX start: 66 length 58
@@ -365,10 +365,10 @@ Stripes:
Stream: column 8 section DICTIONARY_DATA start: 28012 length 1548
Stream: column 9 section DATA start: 29560 length 19
Stream: column 9 section SECONDARY start: 29579 length 1783
- Stream: column 10 section DATA start: 31362 length 2138
- Stream: column 10 section SECONDARY start: 33500 length 231
- Stream: column 11 section DATA start: 33731 length 1877
- Stream: column 11 section LENGTH start: 35608 length 591
+ Stream: column 10 section DATA start: 31362 length 2166
+ Stream: column 10 section SECONDARY start: 33528 length 15
+ Stream: column 11 section DATA start: 33543 length 1877
+ Stream: column 11 section LENGTH start: 35420 length 591
Encoding column 0: DIRECT
Encoding column 1: DIRECT
Encoding column 2: DIRECT_V2
@@ -453,7 +453,7 @@ Stripes:
Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 7 loadFactor: 0.0007 expectedFpp: 1.0959422E-22
Row group indices for column 10:
Entry 0: count: 1000 hasNull: false min: 0 max: 9994 sum: 5118211 positions: 0,0,0,0,0
- Entry 1: count: 49 hasNull: false min: 0 max: 9490 sum: 246405 positions: 0,2159,0,476,4
+ Entry 1: count: 49 hasNull: false min: 0 max: 9490 sum: 246405 positions: 0,2194,0,4,488
Bloom filters for column 10:
Entry 0: numHashFunctions: 7 bitCount: 9600 popCount: 4796 loadFactor: 0.4996 expectedFpp: 0.0077670407
Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 339 loadFactor: 0.0353 expectedFpp: 6.846983E-11
@@ -466,7 +466,7 @@ Stripes:
Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 174 loadFactor: 0.0181 expectedFpp: 6.426078E-13
Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 181 loadFactor: 0.0189 expectedFpp: 8.4693775E-13
-File length: 37141 bytes
+File length: 36950 bytes
Padding length: 0 bytes
Padding ratio: 0%
________________________________________________________________________________________________________________________
@@ -517,7 +517,7 @@ Stripe Statistics:
Column 7: count: 1049 hasNull: false bytesOnDisk: 137 true: 526
Column 8: count: 1049 hasNull: false bytesOnDisk: 3430 min: max: zach zipper sum: 13443
Column 9: count: 1049 hasNull: false bytesOnDisk: 1802 min: 2013-03-01 09:11:58.703 max: 2013-03-01 09:11:58.703 min UTC: 2013-03-01 01:11:58.703 max UTC: 2013-03-01 01:11:58.703
- Column 10: count: 1049 hasNull: false bytesOnDisk: 2369 min: 0 max: 99.94 sum: 53646.16
+ Column 10: count: 1049 hasNull: false bytesOnDisk: 2181 min: 0 max: 99.94 sum: 53646.16
Column 11: count: 1049 hasNull: false bytesOnDisk: 2468 sum: 13278
File Statistics:
@@ -531,11 +531,11 @@ File Statistics:
Column 7: count: 1049 hasNull: false bytesOnDisk: 137 true: 526
Column 8: count: 1049 hasNull: false bytesOnDisk: 3430 min: max: zach zipper sum: 13443
Column 9: count: 1049 hasNull: false bytesOnDisk: 1802 min: 2013-03-01 09:11:58.703 max: 2013-03-01 09:11:58.703 min UTC: 2013-03-01 01:11:58.703 max UTC: 2013-03-01 01:11:58.703
- Column 10: count: 1049 hasNull: false bytesOnDisk: 2369 min: 0 max: 99.94 sum: 53646.16
+ Column 10: count: 1049 hasNull: false bytesOnDisk: 2181 min: 0 max: 99.94 sum: 53646.16
Column 11: count: 1049 hasNull: false bytesOnDisk: 2468 sum: 13278
Stripes:
- Stripe: offset: 3 data: 22593 rows: 1049 tail: 252 index: 8956
+ Stripe: offset: 3 data: 22405 rows: 1049 tail: 253 index: 8956
Stream: column 0 section ROW_INDEX start: 3 length 20
Stream: column 0 section BLOOM_FILTER_UTF8 start: 23 length 34
Stream: column 1 section ROW_INDEX start: 57 length 58
@@ -574,10 +574,10 @@ Stripes:
Stream: column 8 section DICTIONARY_DATA start: 23365 length 1548
Stream: column 9 section DATA start: 24913 length 19
Stream: column 9 section SECONDARY start: 24932 length 1783
- Stream: column 10 section DATA start: 26715 length 2138
- Stream: column 10 section SECONDARY start: 28853 length 231
- Stream: column 11 section DATA start: 29084 length 1877
- Stream: column 11 section LENGTH start: 30961 length 591
+ Stream: column 10 section DATA start: 26715 length 2166
+ Stream: column 10 section SECONDARY start: 28881 length 15
+ Stream: column 11 section DATA start: 28896 length 1877
+ Stream: column 11 section LENGTH start: 30773 length 591
Encoding column 0: DIRECT
Encoding column 1: DIRECT
Encoding column 2: DIRECT_V2
@@ -662,7 +662,7 @@ Stripes:
Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 4 loadFactor: 0.0006 expectedFpp: 1.6543056E-13
Row group indices for column 10:
Entry 0: count: 1000 hasNull: false min: 0 max: 9994 sum: 5118211 positions: 0,0,0,0,0
- Entry 1: count: 49 hasNull: false min: 0 max: 9490 sum: 246405 positions: 0,2159,0,476,4
+ Entry 1: count: 49 hasNull: false min: 0 max: 9490 sum: 246405 positions: 0,2194,0,4,488
Bloom filters for column 10:
Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 2848 loadFactor: 0.4541 expectedFpp: 0.042514365
Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 194 loadFactor: 0.0309 expectedFpp: 9.153406E-7
@@ -675,7 +675,7 @@ Stripes:
Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 98 loadFactor: 0.0156 expectedFpp: 5.9604645E-8
Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 102 loadFactor: 0.0163 expectedFpp: 6.9948186E-8
-File length: 32494 bytes
+File length: 32309 bytes
Padding length: 0 bytes
Padding ratio: 0%
________________________________________________________________________________________________________________________
[29/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge6.q.out b/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
index 3a25787..1359111 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5_n4
-POSTHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5_n4
-PREHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc
+PREHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (year string, hour int) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5a_n1
-POSTHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc
+POSTHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (year string, hour int) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5a_n1
@@ -50,7 +50,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -62,7 +62,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5a_n1
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp), '2000' (type: string), UDFToInteger('24') (type: int)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp), '2000' (type: string), UDFToInteger('24') (type: int)
outputColumnNames: userid, string1, subtype, decimal1, ts, year, hour
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -76,7 +76,7 @@ STAGE PLANS:
sort order: ++
Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Execution mode: llap
LLAP IO: all inputs
Reducer 2
@@ -89,7 +89,7 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: string), _col1 (type: int)
+ expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -121,7 +121,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5a_n1
PREHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13
@@ -132,7 +132,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -145,7 +145,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -233,7 +233,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -245,7 +245,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5a_n1
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp), '2000' (type: string), UDFToInteger('24') (type: int)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp), '2000' (type: string), UDFToInteger('24') (type: int)
outputColumnNames: userid, string1, subtype, decimal1, ts, year, hour
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -259,7 +259,7 @@ STAGE PLANS:
sort order: ++
Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Execution mode: llap
LLAP IO: all inputs
Reducer 2
@@ -272,7 +272,7 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: string), _col1 (type: int)
+ expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -313,7 +313,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5a_n1
Stage: Stage-4
@@ -352,7 +352,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -365,7 +365,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -428,7 +428,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -441,7 +441,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_merge7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge7.q.out b/ql/src/test/results/clientpositive/llap/orc_merge7.q.out
index c1e4fc6..273a5be 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge7.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge7.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5_n2 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5_n2
-POSTHOOK: query: create table orc_merge5_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5_n2 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5_n2
-PREHOOK: query: create table orc_merge5a_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+PREHOOK: query: create table orc_merge5a_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st double) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5a_n0
-POSTHOOK: query: create table orc_merge5a_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+POSTHOOK: query: create table orc_merge5a_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st double) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5a_n0
@@ -46,7 +46,7 @@ STAGE PLANS:
alias: orc_merge5_n2
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp), subtype (type: double)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -58,7 +58,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5a_n0
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp), _col5 (type: double)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp), _col5 (type: double)
outputColumnNames: userid, string1, subtype, decimal1, ts, st
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -72,7 +72,7 @@ STAGE PLANS:
sort order: +
Map-reduce partition columns: _col0 (type: double)
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Execution mode: llap
LLAP IO: all inputs
Reducer 2
@@ -85,7 +85,7 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: double)
+ expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: double)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -116,7 +116,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5a_n0
PREHOOK: query: insert overwrite table orc_merge5a_n0 partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2
@@ -130,22 +130,22 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
POSTHOOK: Output: default@orc_merge5a_n0@st=1.8
POSTHOOK: Output: default@orc_merge5a_n0@st=8.0
POSTHOOK: Output: default@orc_merge5a_n0@st=80.0
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -161,22 +161,22 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
POSTHOOK: Output: default@orc_merge5a_n0@st=1.8
POSTHOOK: Output: default@orc_merge5a_n0@st=8.0
POSTHOOK: Output: default@orc_merge5a_n0@st=80.0
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -263,7 +263,7 @@ STAGE PLANS:
alias: orc_merge5_n2
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp), subtype (type: double)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -275,7 +275,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5a_n0
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp), _col5 (type: double)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp), _col5 (type: double)
outputColumnNames: userid, string1, subtype, decimal1, ts, st
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -289,7 +289,7 @@ STAGE PLANS:
sort order: +
Map-reduce partition columns: _col0 (type: double)
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Execution mode: llap
LLAP IO: all inputs
Reducer 2
@@ -302,7 +302,7 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: double)
+ expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: double)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -342,7 +342,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5a_n0
Stage: Stage-4
@@ -384,22 +384,22 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
POSTHOOK: Output: default@orc_merge5a_n0@st=1.8
POSTHOOK: Output: default@orc_merge5a_n0@st=8.0
POSTHOOK: Output: default@orc_merge5a_n0@st=80.0
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -415,22 +415,22 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
POSTHOOK: Output: default@orc_merge5a_n0@st=1.8
POSTHOOK: Output: default@orc_merge5a_n0@st=8.0
POSTHOOK: Output: default@orc_merge5a_n0@st=80.0
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -499,22 +499,22 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
POSTHOOK: Output: default@orc_merge5a_n0@st=1.8
POSTHOOK: Output: default@orc_merge5a_n0@st=8.0
POSTHOOK: Output: default@orc_merge5a_n0@st=80.0
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -530,22 +530,22 @@ POSTHOOK: Output: default@orc_merge5a_n0@st=0.8
POSTHOOK: Output: default@orc_merge5a_n0@st=1.8
POSTHOOK: Output: default@orc_merge5a_n0@st=8.0
POSTHOOK: Output: default@orc_merge5a_n0@st=80.0
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=0.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=1.8).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=8.0).userid SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).string1 SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).subtype SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n0 PARTITION(st=80.0).ts SIMPLE [(orc_merge5_n2)orc_merge5_n2.FieldSchema(name:ts, type:timestamp, comment:null), ]
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out
index 307e730..676af08 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5_n3
-POSTHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5_n3
-PREHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5b
-POSTHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5b
@@ -49,7 +49,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -61,7 +61,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5b
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp)
outputColumnNames: userid, string1, subtype, decimal1, ts
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -72,7 +72,7 @@ STAGE PLANS:
Reduce Output Operator
sort order:
Statistics: Num rows: 1 Data size: 2696 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Execution mode: llap
LLAP IO: all inputs
Reducer 2
@@ -109,7 +109,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5b
PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13
@@ -120,7 +120,7 @@ POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtyp
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -133,7 +133,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -146,7 +146,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -159,7 +159,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -172,7 +172,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -185,7 +185,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out b/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out
index 7be4ffa..00d0a14 100644
--- a/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5
-POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5
-PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st double) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5a
-POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (st double) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5a
@@ -46,7 +46,7 @@ STAGE PLANS:
alias: orc_merge5
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp), subtype (type: double)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -58,7 +58,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5a
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp), _col5 (type: double)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp), _col5 (type: double)
outputColumnNames: userid, string1, subtype, decimal1, ts, st
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -72,7 +72,7 @@ STAGE PLANS:
sort order: +
Map-reduce partition columns: _col0 (type: double)
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Execution mode: llap
LLAP IO: all inputs
Reducer 2
@@ -85,7 +85,7 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: double)
+ expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: double)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
Statistics: Num rows: 1 Data size: 352 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -116,7 +116,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5a
PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
@@ -130,22 +130,22 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8
POSTHOOK: Output: default@orc_merge5a@st=1.8
POSTHOOK: Output: default@orc_merge5a@st=8.0
POSTHOOK: Output: default@orc_merge5a@st=80.0
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -161,22 +161,22 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8
POSTHOOK: Output: default@orc_merge5a@st=1.8
POSTHOOK: Output: default@orc_merge5a@st=8.0
POSTHOOK: Output: default@orc_merge5a@st=80.0
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -192,22 +192,22 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8
POSTHOOK: Output: default@orc_merge5a@st=1.8
POSTHOOK: Output: default@orc_merge5a@st=8.0
POSTHOOK: Output: default@orc_merge5a@st=80.0
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -223,22 +223,22 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8
POSTHOOK: Output: default@orc_merge5a@st=1.8
POSTHOOK: Output: default@orc_merge5a@st=8.0
POSTHOOK: Output: default@orc_merge5a@st=80.0
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
-POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
[11/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/orc_merge11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge11.q.out b/ql/src/test/results/clientpositive/orc_merge11.q.out
index 1b2ddd3..8e7840c 100644
--- a/ql/src/test/results/clientpositive/orc_merge11.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge11.q.out
@@ -6,11 +6,11 @@ PREHOOK: query: DROP TABLE orc_split_elim_n0
PREHOOK: type: DROPTABLE
POSTHOOK: query: DROP TABLE orc_split_elim_n0
POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_split_elim_n0
-POSTHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_split_elim_n0
@@ -30,36 +30,36 @@ POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' in
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@orc_split_elim_n0
-PREHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096")
+PREHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc tblproperties("orc.compress.size"="4096")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc tblproperties("orc.compress.size"="4096")
+POSTHOOK: query: create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc tblproperties("orc.compress.size"="4096")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orcfile_merge1_n2
-PREHOOK: query: insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0
+PREHOOK: query: insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0 order by userid
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_split_elim_n0
PREHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: query: insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0
+POSTHOOK: query: insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0 order by userid
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_split_elim_n0
POSTHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.string1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.subtype SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.ts SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:ts, type:timestamp, comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.userid SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:userid, type:bigint, comment:null), ]
-PREHOOK: query: insert into table orcfile_merge1_n2 select * from orc_split_elim_n0
+PREHOOK: query: insert into table orcfile_merge1_n2 select * from orc_split_elim_n0 order by userid
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_split_elim_n0
PREHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: query: insert into table orcfile_merge1_n2 select * from orc_split_elim_n0
+POSTHOOK: query: insert into table orcfile_merge1_n2 select * from orc_split_elim_n0 order by userid
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_split_elim_n0
POSTHOOK: Output: default@orcfile_merge1_n2
-POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orcfile_merge1_n2.decimal1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.string1 SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.subtype SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orcfile_merge1_n2.ts SIMPLE [(orc_split_elim_n0)orc_split_elim_n0.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -76,42 +76,42 @@ File Version: 0.12 with ORC_135
Rows: 50000
Compression: ZLIB
Compression size: 4096
-Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(10,0),ts:timestamp>
+Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(38,0),ts:timestamp>
Stripe Statistics:
Stripe 1:
Column 0: count: 50000 hasNull: false
- Column 1: count: 50000 hasNull: false bytesOnDisk: 45 min: 2 max: 100 sum: 4999238
- Column 2: count: 50000 hasNull: false bytesOnDisk: 72 min: bar max: zebra sum: 249980
- Column 3: count: 50000 hasNull: false bytesOnDisk: 5167 min: 0.8 max: 80.0 sum: 400102.80000000005
- Column 4: count: 50000 hasNull: false bytesOnDisk: 542 min: 0 max: 6 sum: 32
- Column 5: count: 50000 hasNull: false bytesOnDisk: 71 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 50000 hasNull: false bytesOnDisk: 30 min: 2 max: 100 sum: 4999238
+ Column 2: count: 50000 hasNull: false bytesOnDisk: 55 min: bar max: zebra sum: 249980
+ Column 3: count: 50000 hasNull: false bytesOnDisk: 5114 min: 0.8 max: 80.0 sum: 400102.8
+ Column 4: count: 50000 hasNull: false bytesOnDisk: 498 min: 0 max: 6 sum: 32
+ Column 5: count: 50000 hasNull: false bytesOnDisk: 64 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
File Statistics:
Column 0: count: 50000 hasNull: false
- Column 1: count: 50000 hasNull: false bytesOnDisk: 45 min: 2 max: 100 sum: 4999238
- Column 2: count: 50000 hasNull: false bytesOnDisk: 72 min: bar max: zebra sum: 249980
- Column 3: count: 50000 hasNull: false bytesOnDisk: 5167 min: 0.8 max: 80.0 sum: 400102.80000000005
- Column 4: count: 50000 hasNull: false bytesOnDisk: 542 min: 0 max: 6 sum: 32
- Column 5: count: 50000 hasNull: false bytesOnDisk: 71 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 50000 hasNull: false bytesOnDisk: 30 min: 2 max: 100 sum: 4999238
+ Column 2: count: 50000 hasNull: false bytesOnDisk: 55 min: bar max: zebra sum: 249980
+ Column 3: count: 50000 hasNull: false bytesOnDisk: 5114 min: 0.8 max: 80.0 sum: 400102.8
+ Column 4: count: 50000 hasNull: false bytesOnDisk: 498 min: 0 max: 6 sum: 32
+ Column 5: count: 50000 hasNull: false bytesOnDisk: 64 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
Stripes:
- Stripe: offset: 3 data: 5897 rows: 50000 tail: 113 index: 497
+ Stripe: offset: 3 data: 5761 rows: 50000 tail: 112 index: 433
Stream: column 0 section ROW_INDEX start: 3 length 17
- Stream: column 1 section ROW_INDEX start: 20 length 83
- Stream: column 2 section ROW_INDEX start: 103 length 81
- Stream: column 3 section ROW_INDEX start: 184 length 111
- Stream: column 4 section ROW_INDEX start: 295 length 110
- Stream: column 5 section ROW_INDEX start: 405 length 95
- Stream: column 1 section DATA start: 500 length 45
- Stream: column 2 section DATA start: 545 length 41
- Stream: column 2 section LENGTH start: 586 length 8
- Stream: column 2 section DICTIONARY_DATA start: 594 length 23
- Stream: column 3 section DATA start: 617 length 5167
- Stream: column 4 section DATA start: 5784 length 524
- Stream: column 4 section SECONDARY start: 6308 length 18
- Stream: column 5 section DATA start: 6326 length 53
- Stream: column 5 section SECONDARY start: 6379 length 18
+ Stream: column 1 section ROW_INDEX start: 20 length 73
+ Stream: column 2 section ROW_INDEX start: 93 length 79
+ Stream: column 3 section ROW_INDEX start: 172 length 85
+ Stream: column 4 section ROW_INDEX start: 257 length 92
+ Stream: column 5 section ROW_INDEX start: 349 length 87
+ Stream: column 1 section DATA start: 436 length 30
+ Stream: column 2 section DATA start: 466 length 24
+ Stream: column 2 section LENGTH start: 490 length 8
+ Stream: column 2 section DICTIONARY_DATA start: 498 length 23
+ Stream: column 3 section DATA start: 521 length 5114
+ Stream: column 4 section DATA start: 5635 length 480
+ Stream: column 4 section SECONDARY start: 6115 length 18
+ Stream: column 5 section DATA start: 6133 length 46
+ Stream: column 5 section SECONDARY start: 6179 length 18
Encoding column 0: DIRECT
Encoding column 1: DIRECT_V2
Encoding column 2: DICTIONARY_V2[6]
@@ -125,37 +125,37 @@ Stripes:
Entry 3: count: 10000 hasNull: false positions:
Entry 4: count: 10000 hasNull: false positions:
Row group indices for column 1:
- Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
- Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
- Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
- Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+ Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999238 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,107,262
+ Entry 2: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,207,22
+ Entry 3: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,302,294
+ Entry 4: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,402,54
Row group indices for column 2:
- Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
- Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
- Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
- Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+ Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49980 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,83,262
+ Entry 2: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,163,22
+ Entry 3: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,239,294
+ Entry 4: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,319,54
Row group indices for column 3:
- Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
- Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
- Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
- Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
- Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+ Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80102.8 positions: 0,0
+ Entry 1: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 1017,2176
+ Entry 2: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 2057,256
+ Entry 3: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 3045,2432
+ Entry 4: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 4085,512
Row group indices for column 4:
- Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
- Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
- Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
- Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+ Entry 0: count: 10000 hasNull: false min: 0 max: 6 sum: 32 positions: 0,0,0,0,0
+ Entry 1: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 84,1808,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 160,3616,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 274,1328,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 350,3136,0,312,64
Row group indices for column 5:
Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,164,391,0,76,272
- Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,336,391,0,156,32
- Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:05.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,508,391,0,232,304
- Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:15.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,680,391,0,312,64
+ Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,194,262,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,354,22,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,506,294,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,666,54,0,312,64
-File length: 6890 bytes
+File length: 6685 bytes
Padding length: 0 bytes
Padding ratio: 0%
________________________________________________________________________________________________________________________
@@ -167,42 +167,42 @@ File Version: 0.12 with ORC_135
Rows: 50000
Compression: ZLIB
Compression size: 4096
-Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(10,0),ts:timestamp>
+Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(38,0),ts:timestamp>
Stripe Statistics:
Stripe 1:
Column 0: count: 50000 hasNull: false
- Column 1: count: 50000 hasNull: false bytesOnDisk: 45 min: 2 max: 100 sum: 4999238
- Column 2: count: 50000 hasNull: false bytesOnDisk: 72 min: bar max: zebra sum: 249980
- Column 3: count: 50000 hasNull: false bytesOnDisk: 5167 min: 0.8 max: 80.0 sum: 400102.80000000005
- Column 4: count: 50000 hasNull: false bytesOnDisk: 542 min: 0 max: 6 sum: 32
- Column 5: count: 50000 hasNull: false bytesOnDisk: 71 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 50000 hasNull: false bytesOnDisk: 30 min: 2 max: 100 sum: 4999238
+ Column 2: count: 50000 hasNull: false bytesOnDisk: 55 min: bar max: zebra sum: 249980
+ Column 3: count: 50000 hasNull: false bytesOnDisk: 5114 min: 0.8 max: 80.0 sum: 400102.8
+ Column 4: count: 50000 hasNull: false bytesOnDisk: 498 min: 0 max: 6 sum: 32
+ Column 5: count: 50000 hasNull: false bytesOnDisk: 64 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
File Statistics:
Column 0: count: 50000 hasNull: false
- Column 1: count: 50000 hasNull: false bytesOnDisk: 45 min: 2 max: 100 sum: 4999238
- Column 2: count: 50000 hasNull: false bytesOnDisk: 72 min: bar max: zebra sum: 249980
- Column 3: count: 50000 hasNull: false bytesOnDisk: 5167 min: 0.8 max: 80.0 sum: 400102.80000000005
- Column 4: count: 50000 hasNull: false bytesOnDisk: 542 min: 0 max: 6 sum: 32
- Column 5: count: 50000 hasNull: false bytesOnDisk: 71 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 50000 hasNull: false bytesOnDisk: 30 min: 2 max: 100 sum: 4999238
+ Column 2: count: 50000 hasNull: false bytesOnDisk: 55 min: bar max: zebra sum: 249980
+ Column 3: count: 50000 hasNull: false bytesOnDisk: 5114 min: 0.8 max: 80.0 sum: 400102.8
+ Column 4: count: 50000 hasNull: false bytesOnDisk: 498 min: 0 max: 6 sum: 32
+ Column 5: count: 50000 hasNull: false bytesOnDisk: 64 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
Stripes:
- Stripe: offset: 3 data: 5897 rows: 50000 tail: 113 index: 497
+ Stripe: offset: 3 data: 5761 rows: 50000 tail: 112 index: 433
Stream: column 0 section ROW_INDEX start: 3 length 17
- Stream: column 1 section ROW_INDEX start: 20 length 83
- Stream: column 2 section ROW_INDEX start: 103 length 81
- Stream: column 3 section ROW_INDEX start: 184 length 111
- Stream: column 4 section ROW_INDEX start: 295 length 110
- Stream: column 5 section ROW_INDEX start: 405 length 95
- Stream: column 1 section DATA start: 500 length 45
- Stream: column 2 section DATA start: 545 length 41
- Stream: column 2 section LENGTH start: 586 length 8
- Stream: column 2 section DICTIONARY_DATA start: 594 length 23
- Stream: column 3 section DATA start: 617 length 5167
- Stream: column 4 section DATA start: 5784 length 524
- Stream: column 4 section SECONDARY start: 6308 length 18
- Stream: column 5 section DATA start: 6326 length 53
- Stream: column 5 section SECONDARY start: 6379 length 18
+ Stream: column 1 section ROW_INDEX start: 20 length 73
+ Stream: column 2 section ROW_INDEX start: 93 length 79
+ Stream: column 3 section ROW_INDEX start: 172 length 85
+ Stream: column 4 section ROW_INDEX start: 257 length 92
+ Stream: column 5 section ROW_INDEX start: 349 length 87
+ Stream: column 1 section DATA start: 436 length 30
+ Stream: column 2 section DATA start: 466 length 24
+ Stream: column 2 section LENGTH start: 490 length 8
+ Stream: column 2 section DICTIONARY_DATA start: 498 length 23
+ Stream: column 3 section DATA start: 521 length 5114
+ Stream: column 4 section DATA start: 5635 length 480
+ Stream: column 4 section SECONDARY start: 6115 length 18
+ Stream: column 5 section DATA start: 6133 length 46
+ Stream: column 5 section SECONDARY start: 6179 length 18
Encoding column 0: DIRECT
Encoding column 1: DIRECT_V2
Encoding column 2: DICTIONARY_V2[6]
@@ -216,37 +216,37 @@ Stripes:
Entry 3: count: 10000 hasNull: false positions:
Entry 4: count: 10000 hasNull: false positions:
Row group indices for column 1:
- Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
- Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
- Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
- Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+ Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999238 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,107,262
+ Entry 2: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,207,22
+ Entry 3: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,302,294
+ Entry 4: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,402,54
Row group indices for column 2:
- Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
- Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
- Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
- Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+ Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49980 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,83,262
+ Entry 2: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,163,22
+ Entry 3: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,239,294
+ Entry 4: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,319,54
Row group indices for column 3:
- Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
- Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
- Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
- Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
- Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+ Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80102.8 positions: 0,0
+ Entry 1: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 1017,2176
+ Entry 2: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 2057,256
+ Entry 3: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 3045,2432
+ Entry 4: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 4085,512
Row group indices for column 4:
- Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
- Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
- Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
- Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+ Entry 0: count: 10000 hasNull: false min: 0 max: 6 sum: 32 positions: 0,0,0,0,0
+ Entry 1: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 84,1808,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 160,3616,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 274,1328,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 350,3136,0,312,64
Row group indices for column 5:
Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,164,391,0,76,272
- Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,336,391,0,156,32
- Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:05.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,508,391,0,232,304
- Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:15.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,680,391,0,312,64
+ Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,194,262,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,354,22,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,506,294,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,666,54,0,312,64
-File length: 6890 bytes
+File length: 6685 bytes
Padding length: 0 bytes
Padding ratio: 0%
________________________________________________________________________________________________________________________
@@ -279,49 +279,49 @@ File Version: 0.12 with ORC_135
Rows: 100000
Compression: ZLIB
Compression size: 4096
-Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(10,0),ts:timestamp>
+Type: struct<userid:bigint,string1:string,subtype:double,decimal1:decimal(38,0),ts:timestamp>
Stripe Statistics:
Stripe 1:
Column 0: count: 50000 hasNull: false
- Column 1: count: 50000 hasNull: false bytesOnDisk: 45 min: 2 max: 100 sum: 4999238
- Column 2: count: 50000 hasNull: false bytesOnDisk: 72 min: bar max: zebra sum: 249980
- Column 3: count: 50000 hasNull: false bytesOnDisk: 5167 min: 0.8 max: 80.0 sum: 400102.80000000005
- Column 4: count: 50000 hasNull: false bytesOnDisk: 542 min: 0 max: 6 sum: 32
- Column 5: count: 50000 hasNull: false bytesOnDisk: 71 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 50000 hasNull: false bytesOnDisk: 30 min: 2 max: 100 sum: 4999238
+ Column 2: count: 50000 hasNull: false bytesOnDisk: 55 min: bar max: zebra sum: 249980
+ Column 3: count: 50000 hasNull: false bytesOnDisk: 5114 min: 0.8 max: 80.0 sum: 400102.8
+ Column 4: count: 50000 hasNull: false bytesOnDisk: 498 min: 0 max: 6 sum: 32
+ Column 5: count: 50000 hasNull: false bytesOnDisk: 64 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
Stripe 2:
Column 0: count: 50000 hasNull: false
- Column 1: count: 50000 hasNull: false bytesOnDisk: 45 min: 2 max: 100 sum: 4999238
- Column 2: count: 50000 hasNull: false bytesOnDisk: 72 min: bar max: zebra sum: 249980
- Column 3: count: 50000 hasNull: false bytesOnDisk: 5167 min: 0.8 max: 80.0 sum: 400102.80000000005
- Column 4: count: 50000 hasNull: false bytesOnDisk: 542 min: 0 max: 6 sum: 32
- Column 5: count: 50000 hasNull: false bytesOnDisk: 71 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 50000 hasNull: false bytesOnDisk: 30 min: 2 max: 100 sum: 4999238
+ Column 2: count: 50000 hasNull: false bytesOnDisk: 55 min: bar max: zebra sum: 249980
+ Column 3: count: 50000 hasNull: false bytesOnDisk: 5114 min: 0.8 max: 80.0 sum: 400102.8
+ Column 4: count: 50000 hasNull: false bytesOnDisk: 498 min: 0 max: 6 sum: 32
+ Column 5: count: 50000 hasNull: false bytesOnDisk: 64 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
File Statistics:
Column 0: count: 100000 hasNull: false
- Column 1: count: 100000 hasNull: false bytesOnDisk: 90 min: 2 max: 100 sum: 9998476
- Column 2: count: 100000 hasNull: false bytesOnDisk: 144 min: bar max: zebra sum: 499960
- Column 3: count: 100000 hasNull: false bytesOnDisk: 10334 min: 0.8 max: 80.0 sum: 800205.6000000001
- Column 4: count: 100000 hasNull: false bytesOnDisk: 1084 min: 0 max: 6 sum: 64
- Column 5: count: 100000 hasNull: false bytesOnDisk: 142 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
+ Column 1: count: 100000 hasNull: false bytesOnDisk: 60 min: 2 max: 100 sum: 9998476
+ Column 2: count: 100000 hasNull: false bytesOnDisk: 110 min: bar max: zebra sum: 499960
+ Column 3: count: 100000 hasNull: false bytesOnDisk: 10228 min: 0.8 max: 80.0 sum: 800205.6
+ Column 4: count: 100000 hasNull: false bytesOnDisk: 996 min: 0 max: 6 sum: 64
+ Column 5: count: 100000 hasNull: false bytesOnDisk: 128 min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0
Stripes:
- Stripe: offset: 3 data: 5897 rows: 50000 tail: 113 index: 497
+ Stripe: offset: 3 data: 5761 rows: 50000 tail: 112 index: 433
Stream: column 0 section ROW_INDEX start: 3 length 17
- Stream: column 1 section ROW_INDEX start: 20 length 83
- Stream: column 2 section ROW_INDEX start: 103 length 81
- Stream: column 3 section ROW_INDEX start: 184 length 111
- Stream: column 4 section ROW_INDEX start: 295 length 110
- Stream: column 5 section ROW_INDEX start: 405 length 95
- Stream: column 1 section DATA start: 500 length 45
- Stream: column 2 section DATA start: 545 length 41
- Stream: column 2 section LENGTH start: 586 length 8
- Stream: column 2 section DICTIONARY_DATA start: 594 length 23
- Stream: column 3 section DATA start: 617 length 5167
- Stream: column 4 section DATA start: 5784 length 524
- Stream: column 4 section SECONDARY start: 6308 length 18
- Stream: column 5 section DATA start: 6326 length 53
- Stream: column 5 section SECONDARY start: 6379 length 18
+ Stream: column 1 section ROW_INDEX start: 20 length 73
+ Stream: column 2 section ROW_INDEX start: 93 length 79
+ Stream: column 3 section ROW_INDEX start: 172 length 85
+ Stream: column 4 section ROW_INDEX start: 257 length 92
+ Stream: column 5 section ROW_INDEX start: 349 length 87
+ Stream: column 1 section DATA start: 436 length 30
+ Stream: column 2 section DATA start: 466 length 24
+ Stream: column 2 section LENGTH start: 490 length 8
+ Stream: column 2 section DICTIONARY_DATA start: 498 length 23
+ Stream: column 3 section DATA start: 521 length 5114
+ Stream: column 4 section DATA start: 5635 length 480
+ Stream: column 4 section SECONDARY start: 6115 length 18
+ Stream: column 5 section DATA start: 6133 length 46
+ Stream: column 5 section SECONDARY start: 6179 length 18
Encoding column 0: DIRECT
Encoding column 1: DIRECT_V2
Encoding column 2: DICTIONARY_V2[6]
@@ -335,51 +335,51 @@ Stripes:
Entry 3: count: 10000 hasNull: false positions:
Entry 4: count: 10000 hasNull: false positions:
Row group indices for column 1:
- Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
- Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
- Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
- Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+ Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999238 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,107,262
+ Entry 2: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,207,22
+ Entry 3: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,302,294
+ Entry 4: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,402,54
Row group indices for column 2:
- Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
- Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
- Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
- Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+ Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49980 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,83,262
+ Entry 2: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,163,22
+ Entry 3: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,239,294
+ Entry 4: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,319,54
Row group indices for column 3:
- Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
- Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
- Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
- Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
- Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+ Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80102.8 positions: 0,0
+ Entry 1: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 1017,2176
+ Entry 2: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 2057,256
+ Entry 3: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 3045,2432
+ Entry 4: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 4085,512
Row group indices for column 4:
- Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
- Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
- Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
- Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+ Entry 0: count: 10000 hasNull: false min: 0 max: 6 sum: 32 positions: 0,0,0,0,0
+ Entry 1: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 84,1808,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 160,3616,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 274,1328,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 350,3136,0,312,64
Row group indices for column 5:
Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,164,391,0,76,272
- Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,336,391,0,156,32
- Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:05.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,508,391,0,232,304
- Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:15.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,680,391,0,312,64
- Stripe: offset: 6510 data: 5897 rows: 50000 tail: 113 index: 497
- Stream: column 0 section ROW_INDEX start: 6510 length 17
- Stream: column 1 section ROW_INDEX start: 6527 length 83
- Stream: column 2 section ROW_INDEX start: 6610 length 81
- Stream: column 3 section ROW_INDEX start: 6691 length 111
- Stream: column 4 section ROW_INDEX start: 6802 length 110
- Stream: column 5 section ROW_INDEX start: 6912 length 95
- Stream: column 1 section DATA start: 7007 length 45
- Stream: column 2 section DATA start: 7052 length 41
- Stream: column 2 section LENGTH start: 7093 length 8
- Stream: column 2 section DICTIONARY_DATA start: 7101 length 23
- Stream: column 3 section DATA start: 7124 length 5167
- Stream: column 4 section DATA start: 12291 length 524
- Stream: column 4 section SECONDARY start: 12815 length 18
- Stream: column 5 section DATA start: 12833 length 53
- Stream: column 5 section SECONDARY start: 12886 length 18
+ Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,194,262,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,354,22,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,506,294,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,666,54,0,312,64
+ Stripe: offset: 6309 data: 5761 rows: 50000 tail: 112 index: 433
+ Stream: column 0 section ROW_INDEX start: 6309 length 17
+ Stream: column 1 section ROW_INDEX start: 6326 length 73
+ Stream: column 2 section ROW_INDEX start: 6399 length 79
+ Stream: column 3 section ROW_INDEX start: 6478 length 85
+ Stream: column 4 section ROW_INDEX start: 6563 length 92
+ Stream: column 5 section ROW_INDEX start: 6655 length 87
+ Stream: column 1 section DATA start: 6742 length 30
+ Stream: column 2 section DATA start: 6772 length 24
+ Stream: column 2 section LENGTH start: 6796 length 8
+ Stream: column 2 section DICTIONARY_DATA start: 6804 length 23
+ Stream: column 3 section DATA start: 6827 length 5114
+ Stream: column 4 section DATA start: 11941 length 480
+ Stream: column 4 section SECONDARY start: 12421 length 18
+ Stream: column 5 section DATA start: 12439 length 46
+ Stream: column 5 section SECONDARY start: 12485 length 18
Encoding column 0: DIRECT
Encoding column 1: DIRECT_V2
Encoding column 2: DICTIONARY_V2[6]
@@ -393,37 +393,37 @@ Stripes:
Entry 3: count: 10000 hasNull: false positions:
Entry 4: count: 10000 hasNull: false positions:
Row group indices for column 1:
- Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
- Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
- Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
- Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+ Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999238 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,107,262
+ Entry 2: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,207,22
+ Entry 3: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,302,294
+ Entry 4: count: 10000 hasNull: false min: 100 max: 100 sum: 1000000 positions: 0,402,54
Row group indices for column 2:
- Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
- Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
- Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
- Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
- Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+ Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49980 positions: 0,0,0
+ Entry 1: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,83,262
+ Entry 2: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,163,22
+ Entry 3: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,239,294
+ Entry 4: count: 10000 hasNull: false min: zebra max: zebra sum: 50000 positions: 0,319,54
Row group indices for column 3:
- Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
- Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
- Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
- Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
- Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+ Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80102.8 positions: 0,0
+ Entry 1: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 1017,2176
+ Entry 2: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 2057,256
+ Entry 3: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 3045,2432
+ Entry 4: count: 10000 hasNull: false min: 8.0 max: 8.0 sum: 80000.0 positions: 4085,512
Row group indices for column 4:
- Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
- Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
- Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
- Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+ Entry 0: count: 10000 hasNull: false min: 0 max: 6 sum: 32 positions: 0,0,0,0,0
+ Entry 1: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 84,1808,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 160,3616,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 274,1328,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 0 max: 0 sum: 0 positions: 350,3136,0,312,64
Row group indices for column 5:
Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,0,0,0,0,0
- Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,164,391,0,76,272
- Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:00.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,336,391,0,156,32
- Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:05.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,508,391,0,232,304
- Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:00:15.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,680,391,0,312,64
+ Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,194,262,0,76,272
+ Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,354,22,0,156,32
+ Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,506,294,0,232,304
+ Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:04:10.0 max: 1969-12-31 16:04:10.0 min UTC: 1969-12-31 08:04:10.0 max UTC: 1969-12-31 08:04:10.0 positions: 0,666,54,0,312,64
-File length: 13411 bytes
+File length: 13004 bytes
Padding length: 0 bytes
Padding ratio: 0%
________________________________________________________________________________________________________________________
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/orc_merge5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge5.q.out b/ql/src/test/results/clientpositive/orc_merge5.q.out
index 0e87ce6..768132c 100644
--- a/ql/src/test/results/clientpositive/orc_merge5.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge5.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5_n5
-POSTHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5_n5 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5_n5
-PREHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5b_n0
-POSTHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5b_n0
@@ -43,7 +43,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -55,7 +55,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5b_n0
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp)
outputColumnNames: userid, string1, subtype, decimal1, ts
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -66,7 +66,7 @@ STAGE PLANS:
Reduce Output Operator
sort order:
Statistics: Num rows: 1 Data size: 2344 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Reduce Operator Tree:
Group By Operator
aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3), compute_stats(VALUE._col4)
@@ -96,7 +96,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5b_n0
PREHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13
@@ -107,7 +107,7 @@ POSTHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,sub
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n5
POSTHOOK: Output: default@orc_merge5b_n0
-POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.string1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.subtype SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.ts SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -159,7 +159,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -171,7 +171,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5b_n0
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp)
outputColumnNames: userid, string1, subtype, decimal1, ts
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -182,7 +182,7 @@ STAGE PLANS:
Reduce Output Operator
sort order:
Statistics: Num rows: 1 Data size: 2344 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Reduce Operator Tree:
Group By Operator
aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3), compute_stats(VALUE._col4)
@@ -221,7 +221,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5b_n0
Stage: Stage-3
@@ -252,7 +252,7 @@ POSTHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,sub
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n5
POSTHOOK: Output: default@orc_merge5b_n0
-POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.string1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.subtype SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.ts SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -286,7 +286,7 @@ POSTHOOK: query: insert overwrite table orc_merge5b_n0 select userid,string1,sub
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n5
POSTHOOK: Output: default@orc_merge5b_n0
-POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b_n0.decimal1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.string1 SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.subtype SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b_n0.ts SIMPLE [(orc_merge5_n5)orc_merge5_n5.FieldSchema(name:ts, type:timestamp, comment:null), ]
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/orc_merge6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge6.q.out b/ql/src/test/results/clientpositive/orc_merge6.q.out
index 39813b7..7c429d6 100644
--- a/ql/src/test/results/clientpositive/orc_merge6.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge6.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5_n4
-POSTHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5_n4 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5_n4
-PREHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc
+PREHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (year string, hour int) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5a_n1
-POSTHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc
+POSTHOOK: query: create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) partitioned by (year string, hour int) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5a_n1
@@ -43,7 +43,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -55,7 +55,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5a_n1
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp), '2000' (type: string), UDFToInteger('24') (type: int)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp), '2000' (type: string), UDFToInteger('24') (type: int)
outputColumnNames: userid, string1, subtype, decimal1, ts, year, hour
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -69,7 +69,7 @@ STAGE PLANS:
sort order: ++
Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Reduce Operator Tree:
Group By Operator
aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3), compute_stats(VALUE._col4)
@@ -78,7 +78,7 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: string), _col1 (type: int)
+ expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -107,7 +107,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5a_n1
PREHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13
@@ -118,7 +118,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -131,7 +131,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -212,7 +212,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -224,7 +224,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5a_n1
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp), '2000' (type: string), UDFToInteger('24') (type: int)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp), '2000' (type: string), UDFToInteger('24') (type: int)
outputColumnNames: userid, string1, subtype, decimal1, ts, year, hour
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -238,7 +238,7 @@ STAGE PLANS:
sort order: ++
Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Reduce Operator Tree:
Group By Operator
aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3), compute_stats(VALUE._col4)
@@ -247,7 +247,7 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: string), _col1 (type: int)
+ expressions: _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col5 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col6 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -285,7 +285,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5a_n1
Stage: Stage-3
@@ -316,7 +316,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -329,7 +329,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -392,7 +392,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2000",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2000/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -405,7 +405,7 @@ POSTHOOK: query: insert overwrite table orc_merge5a_n1 partition (year="2001",ho
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n4
POSTHOOK: Output: default@orc_merge5a_n1@year=2001/hour=24
-POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5a_n1 PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5_n4)orc_merge5_n4.FieldSchema(name:ts, type:timestamp, comment:null), ]
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out
index 5a1b00b..6295714 100644
--- a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out
@@ -1,16 +1,16 @@
-PREHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5_n3
-POSTHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5_n3 (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5_n3
-PREHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_merge5b
-POSTHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal(38,0), ts timestamp) stored as orc
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_merge5b
@@ -42,7 +42,7 @@ STAGE PLANS:
predicate: (userid <= 13L) (type: boolean)
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(38,0)), ts (type: timestamp)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
File Output Operator
@@ -54,7 +54,7 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orc_merge5b
Select Operator
- expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(10,0)), _col4 (type: timestamp)
+ expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: double), _col3 (type: decimal(38,0)), _col4 (type: timestamp)
outputColumnNames: userid, string1, subtype, decimal1, ts
Statistics: Num rows: 1 Data size: 2464020 Basic stats: COMPLETE Column stats: NONE
Group By Operator
@@ -65,7 +65,7 @@ STAGE PLANS:
Reduce Output Operator
sort order:
Statistics: Num rows: 1 Data size: 2344 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:decimal(10,0),max:decimal(10,0),countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+ value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:decimal(38,0),max:decimal(38,0),countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
Reduce Operator Tree:
Group By Operator
aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3), compute_stats(VALUE._col4)
@@ -95,7 +95,7 @@ STAGE PLANS:
Basic Stats Work:
Column Stats Desc:
Columns: userid, string1, subtype, decimal1, ts
- Column Types: bigint, string, double, decimal(10,0), timestamp
+ Column Types: bigint, string, double, decimal(38,0), timestamp
Table: default.orc_merge5b
PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13
@@ -106,7 +106,7 @@ POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtyp
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -119,7 +119,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -132,7 +132,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -145,7 +145,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -158,7 +158,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
@@ -171,7 +171,7 @@ POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,dec
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_merge5_n3
POSTHOOK: Output: default@orc_merge5b
-POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:decimal1, type:decimal(38,0), comment:null), ]
POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:string1, type:string, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:subtype, type:double, comment:null), ]
POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5_n3)orc_merge5_n3.FieldSchema(name:ts, type:timestamp, comment:null), ]
[52/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index da41e6e..626e103 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -791,6 +791,50 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
}
@Override
+ public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+ String dbName, String tableName, List<String> partNames, List<String> colNames,
+ long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName,
+ partNames, colNames, txnId, validWriteIdList);
+ }
+
+ @Override
+ public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+ String catName, String dbName, String tableName, List<String> partNames,
+ List<String> colNames, long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames,
+ partNames);
+ rqst.setCatName(catName);
+ rqst.setTxnId(txnId);
+ rqst.setValidWriteIdList(validWriteIdList);
+ return client.get_partitions_statistics_req(rqst).getPartStats();
+ }
+
+ @Override
+ public AggrStats getAggrColStatsFor(String dbName, String tblName, List<String> colNames,
+ List<String> partNames, long txnId, String writeIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames,
+ partNames, txnId, writeIdList); }
+
+ @Override
+ public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List<String> colNames,
+ List<String> partNames, long txnId, String writeIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ if (colNames.isEmpty() || partNames.isEmpty()) {
+ LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side.");
+ return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate
+ }
+ PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames);
+ req.setCatName(catName);
+ req.setTxnId(txnId);
+ req.setValidWriteIdList(writeIdList);
+ return client.get_aggr_stats_for(req);
+ }
+
+ @Override
public List<Partition> exchange_partitions(Map<String, String> partitionSpecs, String sourceCat,
String sourceDb, String sourceTable, String destCat,
String destDb, String destTableName) throws TException {
@@ -1584,6 +1628,14 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
}
@Override
+ public Table getTable(String dbname, String name,
+ long txnId, String validWriteIdList)
+ throws MetaException, TException, NoSuchObjectException{
+ return getTable(getDefaultCatalog(conf), dbname, name,
+ txnId, validWriteIdList);
+ };
+
+ @Override
public Table getTable(String catName, String dbName, String tableName) throws TException {
GetTableRequest req = new GetTableRequest(dbName, tableName);
req.setCatName(catName);
@@ -1593,6 +1645,18 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
}
@Override
+ public Table getTable(String catName, String dbName, String tableName,
+ long txnId, String validWriteIdList) throws TException {
+ GetTableRequest req = new GetTableRequest(dbName, tableName);
+ req.setCatName(catName);
+ req.setCapabilities(version);
+ req.setTxnId(txnId);
+ req.setValidWriteIdList(validWriteIdList);
+ Table t = client.get_table_req(req).getTable();
+ return deepCopy(filterHook.filterTable(t));
+ }
+
+ @Override
public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
throws TException {
return getTableObjectsByName(getDefaultCatalog(conf), dbName, tableNames);
@@ -1821,21 +1885,42 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
@Override
public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
throws TException {
- alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, null);
+ alter_partitions(
+ getDefaultCatalog(conf), dbName, tblName, newParts, null, -1, null);
}
@Override
public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
EnvironmentContext environmentContext) throws TException {
- alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext);
+ alter_partitions(
+ getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext, -1, null);
+ }
+
+ @Override
+ public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList)
+ throws InvalidOperationException, MetaException, TException {
+ //client.alter_partition_with_environment_context(getDefaultCatalog(conf),
+ // dbName, tblName, newParts, environmentContext);
+ alter_partitions(getDefaultCatalog(conf),
+ dbName, tblName, newParts, environmentContext, txnId, writeIdList);
+
}
@Override
public void alter_partitions(String catName, String dbName, String tblName,
List<Partition> newParts,
- EnvironmentContext environmentContext) throws TException {
- client.alter_partitions_with_environment_context(prependCatalogToDbName(catName, dbName, conf),
- tblName, newParts, environmentContext);
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList) throws TException {
+ AlterPartitionsRequest req = new AlterPartitionsRequest();
+ req.setDbName(prependCatalogToDbName(catName, dbName, conf));
+ req.setTableName(tblName);
+ req.setPartitions(newParts);
+ req.setEnvironmentContext(environmentContext);
+ req.setTxnId(txnId);
+ req.setValidWriteIdList(writeIdList);
+ client.alter_partitions_with_environment_context(req);
}
@Override
@@ -1967,6 +2052,28 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
}
@Override
+ public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
+ List<String> colNames,
+ long txnId,
+ String validWriteIdList) throws TException {
+ return getTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colNames,
+ txnId, validWriteIdList);
+ }
+
+ @Override
+ public List<ColumnStatisticsObj> getTableColumnStatistics(String catName, String dbName,
+ String tableName,
+ List<String> colNames,
+ long txnId,
+ String validWriteIdList) throws TException {
+ TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames);
+ rqst.setCatName(catName);
+ rqst.setTxnId(txnId);
+ rqst.setValidWriteIdList(validWriteIdList);
+ return client.get_table_statistics_req(rqst).getTableStats();
+ }
+
+ @Override
public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
String dbName, String tableName, List<String> partNames, List<String> colNames)
throws TException {
@@ -3319,4 +3426,5 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
req.setMaxCreateTime(maxCreateTime);
return client.get_runtime_stats(req);
}
+
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
index 29c98d1..3a65f77 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
@@ -90,6 +90,11 @@ public interface IHMSHandler extends ThriftHiveMetastore.Iface, Configurable {
Table get_table_core(final String catName, final String dbname, final String name)
throws MetaException, NoSuchObjectException;
+ Table get_table_core(final String catName, final String dbname,
+ final String name, final long txnId,
+ final String writeIdList)
+ throws MetaException, NoSuchObjectException;
+
/**
* Get a list of all transactional listeners.
* @return list of listeners.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index bc09076..c4cd8b4 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -712,6 +712,10 @@ public interface IMetaStoreClient {
Table getTable(String dbName, String tableName) throws MetaException,
TException, NoSuchObjectException;
+ Table getTable(String dbName, String tableName,
+ long txnId, String validWriteIdList)
+ throws MetaException, TException, NoSuchObjectException;
+
/**
* Get a table object.
* @param catName catalog the table is in.
@@ -723,6 +727,8 @@ public interface IMetaStoreClient {
*/
Table getTable(String catName, String dbName, String tableName) throws MetaException, TException;
+ Table getTable(String catName, String dbName, String tableName,
+ long txnId, String validWriteIdList) throws TException;
/**
* Get tables as objects (rather than just fetching their names). This is more expensive and
* should only be used if you actually need all the information about the tables.
@@ -2125,6 +2131,11 @@ public interface IMetaStoreClient {
EnvironmentContext environmentContext)
throws InvalidOperationException, MetaException, TException;
+ void alter_partitions(String dbName, String tblName, List<Partition> newParts,
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList)
+ throws InvalidOperationException, MetaException, TException;
+
/**
* updates a list of partitions
* @param catName catalog name.
@@ -2144,7 +2155,7 @@ public interface IMetaStoreClient {
default void alter_partitions(String catName, String dbName, String tblName,
List<Partition> newParts)
throws InvalidOperationException, MetaException, TException {
- alter_partitions(catName, dbName, tblName, newParts, null);
+ alter_partitions(catName, dbName, tblName, newParts, null,-1, null);
}
/**
@@ -2165,7 +2176,8 @@ public interface IMetaStoreClient {
* if error in communicating with metastore server
*/
void alter_partitions(String catName, String dbName, String tblName, List<Partition> newParts,
- EnvironmentContext environmentContext)
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList)
throws InvalidOperationException, MetaException, TException;
/**
@@ -2346,6 +2358,12 @@ public interface IMetaStoreClient {
List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
List<String> colNames) throws NoSuchObjectException, MetaException, TException;
+ List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
+ List<String> colNames,
+ long txnId,
+ String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException;
+
/**
* Get the column statistics for a set of columns in a table. This should only be used for
* non-partitioned tables. For partitioned tables use
@@ -2363,6 +2381,11 @@ public interface IMetaStoreClient {
List<String> colNames)
throws NoSuchObjectException, MetaException, TException;
+ List<ColumnStatisticsObj> getTableColumnStatistics(String catName, String dbName, String tableName,
+ List<String> colNames,
+ long txnId,
+ String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException;
/**
* Get the column statistics for a set of columns in a partition.
* @param dbName database name
@@ -2379,6 +2402,11 @@ public interface IMetaStoreClient {
String tableName, List<String> partNames, List<String> colNames)
throws NoSuchObjectException, MetaException, TException;
+ Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(String dbName,
+ String tableName, List<String> partNames, List<String> colNames,
+ long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException;
+
/**
* Get the column statistics for a set of columns in a partition.
* @param catName catalog name
@@ -2396,6 +2424,11 @@ public interface IMetaStoreClient {
String catName, String dbName, String tableName, List<String> partNames, List<String> colNames)
throws NoSuchObjectException, MetaException, TException;
+ Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+ String catName, String dbName, String tableName,
+ List<String> partNames, List<String> colNames,
+ long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException;
/**
* Delete partition level column statistics given dbName, tableName, partName and colName, or
* all columns in a partition.
@@ -3237,6 +3270,10 @@ public interface IMetaStoreClient {
AggrStats getAggrColStatsFor(String dbName, String tblName,
List<String> colNames, List<String> partName) throws NoSuchObjectException, MetaException, TException;
+ AggrStats getAggrColStatsFor(String dbName, String tblName,
+ List<String> colNames, List<String> partName,
+ long txnId, String writeIdList) throws NoSuchObjectException, MetaException, TException;
+
/**
* Get aggregated column stats for a set of partitions.
* @param catName catalog name
@@ -3253,6 +3290,10 @@ public interface IMetaStoreClient {
List<String> colNames, List<String> partNames)
throws NoSuchObjectException, MetaException, TException;
+ AggrStats getAggrColStatsFor(String catName, String dbName, String tblName,
+ List<String> colNames, List<String> partNames,
+ long txnId, String writeIdList)
+ throws NoSuchObjectException, MetaException, TException;
/**
* Set table or partition column statistics.
* @param request request object, contains all the table, partition, and statistics information
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index e99f888..28426b2 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -80,140 +80,29 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.DatabaseName;
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.TableName;
+import org.apache.hadoop.hive.common.*;
import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.FunctionType;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.HiveObjectType;
-import org.apache.hadoop.hive.metastore.api.ISchema;
-import org.apache.hadoop.hive.metastore.api.ISchemaName;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesRow;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
-import org.apache.hadoop.hive.metastore.api.ResourceType;
-import org.apache.hadoop.hive.metastore.api.ResourceUri;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.RuntimeStat;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SchemaCompatibility;
-import org.apache.hadoop.hive.metastore.api.SchemaType;
-import org.apache.hadoop.hive.metastore.api.SchemaValidation;
-import org.apache.hadoop.hive.metastore.api.SchemaVersion;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.SerdeType;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMPool;
-import org.apache.hadoop.hive.metastore.api.WMPoolTrigger;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider;
import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory;
import org.apache.hadoop.hive.metastore.metrics.Metrics;
import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
-import org.apache.hadoop.hive.metastore.model.MCatalog;
-import org.apache.hadoop.hive.metastore.model.MColumnDescriptor;
-import org.apache.hadoop.hive.metastore.model.MConstraint;
-import org.apache.hadoop.hive.metastore.model.MCreationMetadata;
-import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
-import org.apache.hadoop.hive.metastore.model.MDatabase;
-import org.apache.hadoop.hive.metastore.model.MDelegationToken;
-import org.apache.hadoop.hive.metastore.model.MFieldSchema;
-import org.apache.hadoop.hive.metastore.model.MFunction;
-import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege;
-import org.apache.hadoop.hive.metastore.model.MISchema;
-import org.apache.hadoop.hive.metastore.model.MMasterKey;
-import org.apache.hadoop.hive.metastore.model.MMetastoreDBProperties;
-import org.apache.hadoop.hive.metastore.model.MNotificationLog;
-import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
-import org.apache.hadoop.hive.metastore.model.MOrder;
-import org.apache.hadoop.hive.metastore.model.MPartition;
-import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
-import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics;
-import org.apache.hadoop.hive.metastore.model.MPartitionEvent;
-import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
-import org.apache.hadoop.hive.metastore.model.MResourceUri;
-import org.apache.hadoop.hive.metastore.model.MRole;
-import org.apache.hadoop.hive.metastore.model.MRoleMap;
-import org.apache.hadoop.hive.metastore.model.MRuntimeStat;
-import org.apache.hadoop.hive.metastore.model.MSchemaVersion;
-import org.apache.hadoop.hive.metastore.model.MSerDeInfo;
-import org.apache.hadoop.hive.metastore.model.MStorageDescriptor;
-import org.apache.hadoop.hive.metastore.model.MStringList;
-import org.apache.hadoop.hive.metastore.model.MTable;
-import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
-import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics;
-import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
-import org.apache.hadoop.hive.metastore.model.MType;
-import org.apache.hadoop.hive.metastore.model.MVersionTable;
-import org.apache.hadoop.hive.metastore.model.MWMMapping;
+import org.apache.hadoop.hive.metastore.model.*;
import org.apache.hadoop.hive.metastore.model.MWMMapping.EntityType;
-import org.apache.hadoop.hive.metastore.model.MWMPool;
-import org.apache.hadoop.hive.metastore.model.MWMResourcePlan;
import org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status;
-import org.apache.hadoop.hive.metastore.model.MWMTrigger;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
+import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
import org.apache.hadoop.hive.metastore.utils.FileUtils;
import org.apache.hadoop.hive.metastore.utils.JavaUtils;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.utils.ObjectPair;
+import org.apache.hive.common.util.TxnIdUtils;
import org.apache.thrift.TException;
import org.datanucleus.AbstractNucleusContext;
import org.datanucleus.ClassLoaderResolver;
@@ -1303,10 +1192,16 @@ public class ObjectStore implements RawStore, Configurable {
@Override
public void createTable(Table tbl) throws InvalidObjectException, MetaException {
boolean commited = false;
+ MTable mtbl = null;
+
try {
openTransaction();
- MTable mtbl = convertToMTable(tbl);
+ mtbl = convertToMTable(tbl);
+ if (TxnUtils.isTransactionalTable(tbl)) {
+ mtbl.setTxnId(tbl.getTxnId());
+ mtbl.setWriteIdList(tbl.getValidWriteIdList());
+ }
pm.makePersistent(mtbl);
if (tbl.getCreationMetadata() != null) {
@@ -1417,6 +1312,8 @@ public class ObjectStore implements RawStore, Configurable {
TableName.getQualified(catName, dbName, tableName));
}
+ Table table = convertToTable(tbl);
+
List<MConstraint> tabConstraints = listAllTableConstraintsWithOptionalConstraintName(
catName, dbName, tableName, null);
if (CollectionUtils.isNotEmpty(tabConstraints)) {
@@ -1515,17 +1412,51 @@ public class ObjectStore implements RawStore, Configurable {
return mConstraints;
}
+ private static String getFullyQualifiedTableName(String dbName, String tblName) {
+ return ((dbName == null || dbName.isEmpty()) ? "" : "\"" + dbName + "\".\"")
+ + "\"" + tblName + "\"";
+ }
+
+ @Override
+ public Table
+ getTable(String catName, String dbName, String tableName)
+ throws MetaException {
+ return getTable(catName, dbName, tableName, -1, null);
+ }
+
@Override
- public Table getTable(String catName, String dbName, String tableName) throws MetaException {
+ public Table getTable(String catName, String dbName, String tableName,
+ long txnId, String writeIdList)
+ throws MetaException {
boolean commited = false;
Table tbl = null;
try {
openTransaction();
- tbl = convertToTable(getMTable(catName, dbName, tableName));
+ MTable mtable = getMTable(catName, dbName, tableName);
+ tbl = convertToTable(mtable);
// Retrieve creation metadata if needed
if (tbl != null && TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) {
tbl.setCreationMetadata(
- convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName)));
+ convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName)));
+ }
+
+ // If transactional non partitioned table,
+ // check whether the current version table statistics
+ // in the metastore comply with the client query's snapshot isolation.
+ // Note: a partitioned table has table stats and table snapshot in MPartiiton.
+ if (writeIdList != null) {
+ if (tbl != null
+ && TxnUtils.isTransactionalTable(tbl)
+ && tbl.getPartitionKeysSize() == 0) {
+ if (isCurrentStatsValidForTheQuery(mtable, txnId, writeIdList, -1, false)) {
+ tbl.setIsStatsCompliant(IsolationLevelCompliance.YES);
+ } else {
+ tbl.setIsStatsCompliant(IsolationLevelCompliance.NO);
+ // Do not make persistent the following state since it is the query specific (not global).
+ StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
+ LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters.");
+ }
+ }
}
commited = commitTransaction();
} finally {
@@ -2049,12 +1980,17 @@ public class ObjectStore implements RawStore, Configurable {
String ownerType = (ownerPrincipalType == null) ? PrincipalType.USER.name() : ownerPrincipalType.name();
// A new table is always created with a new column descriptor
- return new MTable(normalizeIdentifier(tbl.getTableName()), mdb,
+ MTable mtable = new MTable(normalizeIdentifier(tbl.getTableName()), mdb,
convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), ownerType, tbl
.getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(),
convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(),
tbl.getViewOriginalText(), tbl.getViewExpandedText(), tbl.isRewriteEnabled(),
tableType);
+ if (TxnUtils.isTransactionalTable(tbl)) {
+ mtable.setTxnId(tbl.getTxnId());
+ mtable.setWriteIdList(tbl.getValidWriteIdList());
+ }
+ return mtable;
}
private List<MFieldSchema> convertToMFieldSchemas(List<FieldSchema> keys) {
@@ -2331,6 +2267,7 @@ public class ObjectStore implements RawStore, Configurable {
+ dbName + "." + tblName + ": " + part);
}
MPartition mpart = convertToMPart(part, table, true);
+
toPersist.add(mpart);
int now = (int)(System.currentTimeMillis()/1000);
if (tabGrants != null) {
@@ -2442,6 +2379,7 @@ public class ObjectStore implements RawStore, Configurable {
MetaException {
boolean success = false;
boolean commited = false;
+
try {
String catName = part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf);
MTable table = this.getMTable(catName, part.getDbName(), part.getTableName());
@@ -2453,7 +2391,7 @@ public class ObjectStore implements RawStore, Configurable {
catName, part.getDbName(), part.getTableName());
}
openTransaction();
- MPartition mpart = convertToMPart(part, true);
+ MPartition mpart = convertToMPart(part, table, true);
pm.makePersistent(mpart);
int now = (int)(System.currentTimeMillis()/1000);
@@ -2495,14 +2433,38 @@ public class ObjectStore implements RawStore, Configurable {
@Override
public Partition getPartition(String catName, String dbName, String tableName,
List<String> part_vals) throws NoSuchObjectException, MetaException {
+ return getPartition(catName, dbName, tableName, part_vals, -1, null);
+ }
+
+ @Override
+ public Partition getPartition(String catName, String dbName, String tableName,
+ List<String> part_vals,
+ long txnId, String writeIdList)
+ throws NoSuchObjectException, MetaException {
openTransaction();
- Partition part = convertToPart(getMPartition(catName, dbName, tableName, part_vals));
+ MTable table = this.getMTable(catName, dbName, tableName);
+ MPartition mpart = getMPartition(catName, dbName, tableName, part_vals);
+ Partition part = convertToPart(mpart);
commitTransaction();
if(part == null) {
throw new NoSuchObjectException("partition values="
+ part_vals.toString());
}
part.setValues(part_vals);
+ // If transactional table partition, check whether the current version partition
+ // statistics in the metastore comply with the client query's snapshot isolation.
+ if (writeIdList != null) {
+ if (TxnUtils.isTransactionalTable(table.getParameters())) {
+ if (isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList, -1, false)) {
+ part.setIsStatsCompliant(IsolationLevelCompliance.YES);
+ } else {
+ part.setIsStatsCompliant(IsolationLevelCompliance.NO);
+ // Do not make persistent the following state since it is query specific (not global).
+ StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE);
+ LOG.info("Removed COLUMN_STATS_ACCURATE from Partition object's parameters.");
+ }
+ }
+ }
return part;
}
@@ -2601,26 +2563,6 @@ public class ObjectStore implements RawStore, Configurable {
* is true, then this partition's storage descriptor's column descriptor will point
* to the same one as the table's storage descriptor.
* @param part the partition to convert
- * @param useTableCD whether to try to use the parent table's column descriptor.
- * @return the model partition object, and null if the input partition is null.
- * @throws InvalidObjectException
- * @throws MetaException
- */
- private MPartition convertToMPart(Partition part, boolean useTableCD)
- throws InvalidObjectException, MetaException {
- if (part == null) {
- return null;
- }
- MTable mt = getMTable(part.getCatName(), part.getDbName(), part.getTableName());
- return convertToMPart(part, mt, useTableCD);
- }
-
- /**
- * Convert a Partition object into an MPartition, which is an object backed by the db
- * If the Partition's set of columns is the same as the parent table's AND useTableCD
- * is true, then this partition's storage descriptor's column descriptor will point
- * to the same one as the table's storage descriptor.
- * @param part the partition to convert
* @param mt the parent table object
* @param useTableCD whether to try to use the parent table's column descriptor.
* @return the model partition object, and null if the input partition is null.
@@ -2652,10 +2594,15 @@ public class ObjectStore implements RawStore, Configurable {
msd = convertToMStorageDescriptor(part.getSd());
}
- return new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt
+ MPartition mpart = new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt
.getPartitionKeys()), part.getValues()), mt, part.getValues(), part
.getCreateTime(), part.getLastAccessTime(),
msd, part.getParameters());
+ if (TxnUtils.isTransactionalTable(mt.getParameters())) {
+ mpart.setTxnId(part.getTxnId());
+ mpart.setWriteIdList(part.getValidWriteIdList());
+ }
+ return mpart;
}
private Partition convertToPart(MPartition mpart) throws MetaException {
@@ -3031,7 +2978,7 @@ public class ObjectStore implements RawStore, Configurable {
TableName.getQualified(catName, dbName, tableName), filter, cols);
List<String> partitionNames = null;
List<Partition> partitions = null;
- Table tbl = getTable(catName, dbName, tableName);
+ Table tbl = getTable(catName, dbName, tableName, -1, null);
try {
// Get partitions by name - ascending or descending
partitionNames = getPartitionNamesByFilter(catName, dbName, tableName, filter, ascending,
@@ -3164,7 +3111,8 @@ public class ObjectStore implements RawStore, Configurable {
if (applyDistinct) {
partValuesSelect.append("DISTINCT ");
}
- List<FieldSchema> partitionKeys = getTable(catName, dbName, tableName).getPartitionKeys();
+ List<FieldSchema> partitionKeys =
+ getTable(catName, dbName, tableName, -1, null).getPartitionKeys();
for (FieldSchema key : cols) {
partValuesSelect.append(extractPartitionKey(key, partitionKeys)).append(", ");
}
@@ -3246,7 +3194,7 @@ public class ObjectStore implements RawStore, Configurable {
catName = normalizeIdentifier(catName);
dbName = normalizeIdentifier(dbName);
tableName = normalizeIdentifier(tableName);
- Table table = getTable(catName, dbName, tableName);
+ Table table = getTable(catName, dbName, tableName, -1, null);
if (table == null) {
throw new NoSuchObjectException(TableName.getQualified(catName, dbName, tableName)
+ " table not found");
@@ -3622,7 +3570,8 @@ public class ObjectStore implements RawStore, Configurable {
protected T results = null;
public GetHelper(String catalogName, String dbName, String tblName,
- boolean allowSql, boolean allowJdo) throws MetaException {
+ boolean allowSql, boolean allowJdo)
+ throws MetaException {
assert allowSql || allowJdo;
this.allowJdo = allowJdo;
this.catName = (catalogName != null) ? normalizeIdentifier(catalogName) : null;
@@ -3840,7 +3789,7 @@ public class ObjectStore implements RawStore, Configurable {
private abstract class GetStatHelper extends GetHelper<ColumnStatistics> {
public GetStatHelper(String catalogName, String dbName, String tblName, boolean allowSql,
- boolean allowJdo) throws MetaException {
+ boolean allowJdo, String writeIdList) throws MetaException {
super(catalogName, dbName, tblName, allowSql, allowJdo);
}
@@ -4140,6 +4089,21 @@ public class ObjectStore implements RawStore, Configurable {
oldt.setViewExpandedText(newt.getViewExpandedText());
oldt.setRewriteEnabled(newt.isRewriteEnabled());
+ // If transactional, update MTable to have txnId and the writeIdList
+ // for the current Stats updater query.
+ if (newTable.getValidWriteIdList() != null &&
+ TxnUtils.isTransactionalTable(newTable)) {
+ // Check concurrent INSERT case and set false to the flag.
+ if (isCurrentStatsValidForTheQuery(oldt, newt.getTxnId(), newt.getWriteIdList(),
+ -1, true)) {
+ StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE);
+ LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " +
+ dbname + "." + name + ". will be made persistent.");
+ }
+ oldt.setTxnId(newTable.getTxnId());
+ oldt.setWriteIdList(newTable.getValidWriteIdList());
+ }
+
// commit the changes
success = commitTransaction();
} finally {
@@ -4192,8 +4156,9 @@ public class ObjectStore implements RawStore, Configurable {
catName = normalizeIdentifier(catName);
name = normalizeIdentifier(name);
dbname = normalizeIdentifier(dbname);
+ MTable table = this.getMTable(catName, dbname, name);
MPartition oldp = getMPartition(catName, dbname, name, part_vals);
- MPartition newp = convertToMPart(newPart, false);
+ MPartition newp = convertToMPart(newPart, table, false);
MColumnDescriptor oldCD = null;
MStorageDescriptor oldSD = oldp.getSd();
if (oldSD != null) {
@@ -4214,6 +4179,20 @@ public class ObjectStore implements RawStore, Configurable {
if (newp.getLastAccessTime() != oldp.getLastAccessTime()) {
oldp.setLastAccessTime(newp.getLastAccessTime());
}
+ // If transactional, add/update the MUPdaterTransaction
+ // for the current updater query.
+ if (newPart.getValidWriteIdList() != null &&
+ TxnUtils.isTransactionalTable(table.getParameters())) {
+ // Check concurrent INSERT case and set false to the flag.
+ if (!isCurrentStatsValidForTheQuery(oldp, newp.getTxnId(), newp.getWriteIdList(),
+ -1, true)) {
+ StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE);
+ LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " +
+ dbname + "." + name + "." + oldp.getPartitionName() + " will be made persistent.");
+ }
+ oldp.setTxnId(newPart.getTxnId());
+ oldp.setWriteIdList(newPart.getValidWriteIdList());
+ }
return oldCD;
}
@@ -4245,7 +4224,8 @@ public class ObjectStore implements RawStore, Configurable {
@Override
public void alterPartitions(String catName, String dbname, String name,
- List<List<String>> part_vals, List<Partition> newParts)
+ List<List<String>> part_vals, List<Partition> newParts,
+ long txnId, String writeIdList)
throws InvalidObjectException, MetaException {
boolean success = false;
Exception e = null;
@@ -4255,6 +4235,10 @@ public class ObjectStore implements RawStore, Configurable {
Set<MColumnDescriptor> oldCds = new HashSet<>();
for (Partition tmpPart: newParts) {
List<String> tmpPartVals = part_val_itr.next();
+ if (txnId > 0) {
+ tmpPart.setTxnId(txnId);
+ tmpPart.setValidWriteIdList(writeIdList);
+ }
MColumnDescriptor oldCd = alterPartitionNoTxn(catName, dbname, name, tmpPartVals, tmpPart);
if (oldCd != null) {
oldCds.add(oldCd);
@@ -6131,7 +6115,9 @@ public class ObjectStore implements RawStore, Configurable {
} else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) {
boolean found = false;
- Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject.getObjectName());
+ Table tabObj =
+ this.getTable(catName, hiveObject.getDbName(),
+ hiveObject.getObjectName(), -1, null);
String partName = null;
if (hiveObject.getPartValues() != null) {
partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues());
@@ -6165,7 +6151,7 @@ public class ObjectStore implements RawStore, Configurable {
} else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) {
Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject
- .getObjectName());
+ .getObjectName(), -1, null);
String partName = null;
if (hiveObject.getPartValues() != null) {
partName = Warehouse.makePartName(tabObj.getPartitionKeys(),
@@ -7687,7 +7673,7 @@ public class ObjectStore implements RawStore, Configurable {
query
.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4," +
"java.lang.String t5");
- Table tbl = getTable(catName, dbName, tblName); // Make sure dbName and tblName are valid.
+ Table tbl = getTable(catName, dbName, tblName, -1, null); // Make sure dbName and tblName are valid.
if (null == tbl) {
throw new UnknownTableException("Table: " + tblName + " is not found.");
}
@@ -7713,7 +7699,7 @@ public class ObjectStore implements RawStore, Configurable {
Table tbl = null;
try{
openTransaction();
- tbl = getTable(catName, dbName, tblName); // Make sure dbName and tblName are valid.
+ tbl = getTable(catName, dbName, tblName, -1, null); // Make sure dbName and tblName are valid.
if(null == tbl) {
throw new UnknownTableException("Table: "+ tblName + " is not found.");
}
@@ -8442,7 +8428,10 @@ public class ObjectStore implements RawStore, Configurable {
}
}
- private List<MTableColumnStatistics> getMTableColumnStatistics(Table table, List<String> colNames, QueryWrapper queryWrapper)
+ private List<MTableColumnStatistics> getMTableColumnStatistics(
+ Table table,
+ List<String> colNames,
+ QueryWrapper queryWrapper)
throws MetaException {
if (colNames == null || colNames.isEmpty()) {
return Collections.emptyList();
@@ -8517,9 +8506,40 @@ public class ObjectStore implements RawStore, Configurable {
}
@Override
- public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
+ public ColumnStatistics getTableColumnStatistics(
+ String catName,
+ String dbName,
+ String tableName,
List<String> colNames) throws MetaException, NoSuchObjectException {
- return getTableColumnStatisticsInternal(catName, dbName, tableName, colNames, true, true);
+ return getTableColumnStatisticsInternal(
+ catName, dbName, tableName, colNames, true, true);
+ }
+
+ @Override
+ public ColumnStatistics getTableColumnStatistics(
+ String catName,
+ String dbName,
+ String tableName,
+ List<String> colNames,
+ long txnId,
+ String writeIdList) throws MetaException, NoSuchObjectException {
+ IsolationLevelCompliance iLL = IsolationLevelCompliance.UNKNOWN;
+ // If the current stats in the metastore doesn't comply with
+ // the isolation level of the query, set No to the compliance flag.
+ if (writeIdList != null) {
+ MTable table = this.getMTable(catName, dbName, tableName);
+ if (!isCurrentStatsValidForTheQuery(table, txnId, writeIdList, -1, false)) {
+ iLL = IsolationLevelCompliance.NO;
+ } else {
+ iLL = IsolationLevelCompliance.YES;
+ }
+ }
+ ColumnStatistics cS = getTableColumnStatisticsInternal(
+ catName, dbName, tableName, colNames, true, true);
+ if (cS != null) {
+ cS.setIsStatsCompliant(iLL);
+ }
+ return cS;
}
protected ColumnStatistics getTableColumnStatisticsInternal(
@@ -8527,7 +8547,7 @@ public class ObjectStore implements RawStore, Configurable {
boolean allowJdo) throws MetaException, NoSuchObjectException {
final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR);
return new GetStatHelper(normalizeIdentifier(catName), normalizeIdentifier(dbName),
- normalizeIdentifier(tableName), allowSql, allowJdo) {
+ normalizeIdentifier(tableName), allowSql, allowJdo, null) {
@Override
protected ColumnStatistics getSqlResult(GetHelper<ColumnStatistics> ctx) throws MetaException {
return directSql.getTableStats(catName, dbName, tblName, colNames, enableBitVector);
@@ -8538,7 +8558,8 @@ public class ObjectStore implements RawStore, Configurable {
QueryWrapper queryWrapper = new QueryWrapper();
try {
- List<MTableColumnStatistics> mStats = getMTableColumnStatistics(getTable(), colNames, queryWrapper);
+ List<MTableColumnStatistics> mStats =
+ getMTableColumnStatistics(getTable(), colNames, queryWrapper);
if (mStats.isEmpty()) {
return null;
}
@@ -8568,6 +8589,35 @@ public class ObjectStore implements RawStore, Configurable {
catName, dbName, tableName, partNames, colNames, true, true);
}
+ @Override
+ public List<ColumnStatistics> getPartitionColumnStatistics(
+ String catName, String dbName, String tableName,
+ List<String> partNames, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ // If any of the current partition stats in the metastore doesn't comply with
+ // the isolation level of the query, return null.
+ if (writeIdList != null) {
+ if (partNames == null && partNames.isEmpty()) {
+ LOG.warn("The given partNames does not have any name.");
+ return null;
+ }
+ // Loop through the given "partNames" list
+ // checking isolation-level-compliance of each partition column stats.
+ for(String partName : partNames) {
+ MPartition mpart = getMPartition(catName, dbName, tableName, Warehouse.getPartValuesFromPartName(partName));
+ if (!isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList, -1, false)) {
+ LOG.debug("The current metastore transactional partition column statistics " +
+ "for " + dbName + "." + tableName + "." + mpart.getPartitionName() + " is not valid " +
+ "for the current query.");
+ return null;
+ }
+ }
+ }
+ return getPartitionColumnStatisticsInternal(
+ catName, dbName, tableName, partNames, colNames, true, true);
+ }
+
protected List<ColumnStatistics> getPartitionColumnStatisticsInternal(
String catName, String dbName, String tableName, final List<String> partNames, final List<String> colNames,
boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException {
@@ -8616,10 +8666,36 @@ public class ObjectStore implements RawStore, Configurable {
}.run(true);
}
+ @Override
+ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
+ final List<String> partNames, final List<String> colNames,
+ long txnId, String writeIdList) throws MetaException, NoSuchObjectException {
+ // If the current stats in the metastore doesn't comply with
+ // the isolation level of the query, return null.
+ if (writeIdList != null) {
+ if (partNames == null && partNames.isEmpty()) {
+ LOG.warn("The given partNames does not have any name.");
+ return null;
+ }
+ // Loop through the given "partNames" list
+ // checking isolation-level-compliance of each partition column stats.
+ for(String partName : partNames) {
+ MPartition mpart = getMPartition(catName, dbName, tblName, Warehouse.getPartValuesFromPartName(partName));
+ if (!isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList, -1, false)) {
+ LOG.debug("The current metastore transactional partition column statistics " +
+ "for " + dbName + "." + tblName + "." + mpart.getPartitionName() + " is not valid " +
+ "for the current query.");
+ return null;
+ }
+ }
+ }
+ return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+ }
@Override
public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
- final List<String> partNames, final List<String> colNames) throws MetaException, NoSuchObjectException {
+ final List<String> partNames, final List<String> colNames)
+ throws MetaException, NoSuchObjectException {
final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(),
ConfVars.STATS_NDV_DENSITY_FUNCTION);
final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER);
@@ -8651,7 +8727,8 @@ public class ObjectStore implements RawStore, Configurable {
throws MetaException, NoSuchObjectException {
final boolean enableBitVector =
MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR);
- return new GetHelper<List<MetaStoreUtils.ColStatsObjWithSourceInfo>>(catName, dbName, null, true, false) {
+ return new GetHelper<List<MetaStoreUtils.ColStatsObjWithSourceInfo>>(
+ catName, dbName, null, true, false) {
@Override
protected List<MetaStoreUtils.ColStatsObjWithSourceInfo> getSqlResult(
GetHelper<List<MetaStoreUtils.ColStatsObjWithSourceInfo>> ctx) throws MetaException {
@@ -12109,4 +12186,88 @@ public class ObjectStore implements RawStore, Configurable {
return ret;
}
+ /**
+ * Return true if the current statistics in the Metastore is valid
+ * for the query of the given "txnId" and "queryValidWriteIdList".
+ *
+ * Note that a statistics entity is valid iff
+ * the stats is written by the current query or
+ * the conjunction of the following two are true:
+ * ~ COLUMN_STATE_ACCURATE(CSA) state is true
+ * ~ Isolation-level (snapshot) compliant with the query
+ * @param tbl MTable of the stats entity
+ * @param txnId transaction id of the query
+ * @param queryValidWriteIdList valid writeId list of the query
+ * @Precondition "tbl" should be retrieved from the TBLS table.
+ */
+ private boolean isCurrentStatsValidForTheQuery(
+ MTable tbl, long txnId, String queryValidWriteIdList,
+ long statsWriteId, boolean checkConcurrentWrites)
+ throws MetaException {
+ return isCurrentStatsValidForTheQuery(tbl.getTxnId(), tbl.getParameters(), tbl.getWriteIdList(),
+ txnId, queryValidWriteIdList, statsWriteId, checkConcurrentWrites);
+ }
+
+ /**
+ * Return true if the current statistics in the Metastore is valid
+ * for the query of the given "txnId" and "queryValidWriteIdList".
+ *
+ * Note that a statistics entity is valid iff
+ * the stats is written by the current query or
+ * the conjunction of the following two are true:
+ * ~ COLUMN_STATE_ACCURATE(CSA) state is true
+ * ~ Isolation-level (snapshot) compliant with the query
+ * @param part MPartition of the stats entity
+ * @param txnId transaction id of the query
+ * @param queryValidWriteIdList valid writeId list of the query
+ * @Precondition "part" should be retrieved from the PARTITIONS table.
+ */
+ private boolean isCurrentStatsValidForTheQuery(
+ MPartition part, long txnId, String queryValidWriteIdList,
+ long statsWriteId, boolean checkConcurrentWrites)
+ throws MetaException {
+ return isCurrentStatsValidForTheQuery(part.getTxnId(), part.getParameters(), part.getWriteIdList(),
+ txnId, queryValidWriteIdList, statsWriteId, checkConcurrentWrites);
+ }
+
+ private boolean isCurrentStatsValidForTheQuery(
+ long statsTxnId, Map<String, String> statsParams, String statsWriteIdList,
+ long queryTxnId, String queryValidWriteIdList,
+ long statsWriteId, boolean checkConcurrentWrites)
+ throws MetaException {
+ // If the current query is a stats updater, then we can return true
+ // to avoid implementing a logic inside TxnIdUtils.checkEquivalentWriteIds().
+ if (statsTxnId == queryTxnId) {
+ return true;
+ }
+
+ // If the Metastore stats's writer transaction is open or aborted
+ // we should return false.
+ try {
+ if (TxnDbUtil.isOpenOrAbortedTransaction(conf, statsTxnId)) {
+ return false;
+ }
+ } catch (Exception e) {
+ throw new MetaException("Cannot check transaction state.");
+ }
+
+ // This COLUMN_STATS_ACCURATE(CSA) state checking also includes the case that the stats is
+ // written by an aborted transaction but TXNS has no entry for the transaction
+ // after compaction.
+ if (!StatsSetupConst.areBasicStatsUptoDate(statsParams)) {
+ return false;
+ }
+
+ // If the NUM_FILES of the table/partition is 0, return 'true' from this method.
+ // Since newly initialized empty table has 0 for the parameter.
+ if (Long.parseLong(statsParams.get(StatsSetupConst.NUM_FILES)) == 0) {
+ return true;
+ }
+
+ ValidWriteIdList list4Stats = new ValidReaderWriteIdList(statsWriteIdList);
+ ValidWriteIdList list4TheQuery = new ValidReaderWriteIdList(queryValidWriteIdList);
+
+ return !checkConcurrentWrites ? TxnIdUtils.checkEquivalentWriteIds(list4Stats, list4TheQuery) :
+ !TxnIdUtils.areTheseConcurrentWrites(list4Stats, list4TheQuery, statsWriteId);
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index bbbdf21..e1c1ab9 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -19,10 +19,7 @@
package org.apache.hadoop.hive.metastore;
import org.apache.hadoop.hive.common.TableName;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.ISchemaName;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+import org.apache.hadoop.hive.metastore.api.*;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
@@ -34,59 +31,6 @@ import java.util.Map;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.ISchema;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.RuntimeStat;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SchemaVersion;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMPool;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
import org.apache.thrift.TException;
@@ -266,6 +210,20 @@ public interface RawStore extends Configurable {
Table getTable(String catalogName, String dbName, String tableName) throws MetaException;
/**
+ * Get a table object.
+ * @param catalogName catalog the table is in.
+ * @param dbName database the table is in.
+ * @param tableName table name.
+ * @param txnId transaction id of the calling transaction
+ * @param writeIdList string format of valid writeId transaction list
+ * @return table object, or null if no such table exists (wow it would be nice if we either
+ * consistently returned null or consistently threw NoSuchObjectException).
+ * @throws MetaException something went wrong in the RDBMS
+ */
+ Table getTable(String catalogName, String dbName, String tableName,
+ long txnId, String writeIdList) throws MetaException;
+
+ /**
* Add a partition.
* @param part partition to add
* @return true if the partition was successfully added.
@@ -317,6 +275,22 @@ public interface RawStore extends Configurable {
*/
Partition getPartition(String catName, String dbName, String tableName,
List<String> part_vals) throws MetaException, NoSuchObjectException;
+ /**
+ * Get a partition.
+ * @param catName catalog name.
+ * @param dbName database name.
+ * @param tableName table name.
+ * @param part_vals partition values for this table.
+ * @param txnId transaction id of the calling transaction
+ * @param writeIdList string format of valid writeId transaction list
+ * @return the partition.
+ * @throws MetaException error reading from RDBMS.
+ * @throws NoSuchObjectException no partition matching this specification exists.
+ */
+ Partition getPartition(String catName, String dbName, String tableName,
+ List<String> part_vals,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException;
/**
* Check whether a partition exists.
@@ -525,11 +499,14 @@ public interface RawStore extends Configurable {
* @param new_parts list of new partitions. The order must match the old partitions described in
* part_vals_list. Each of these should be a complete copy of the new
* partition, not just the pieces to update.
+ * @param txnId transaction id of the transaction that called this method.
+ * @param writeIdList valid write id list of the transaction on the current table
* @throws InvalidObjectException One of the indicated partitions does not exist.
* @throws MetaException error accessing the RDBMS.
*/
void alterPartitions(String catName, String db_name, String tbl_name,
- List<List<String>> part_vals_list, List<Partition> new_parts)
+ List<List<String>> part_vals_list, List<Partition> new_parts,
+ long txnId, String writeIdList)
throws InvalidObjectException, MetaException;
/**
@@ -901,6 +878,25 @@ public interface RawStore extends Configurable {
List<String> colName) throws MetaException, NoSuchObjectException;
/**
+ * Returns the relevant column statistics for a given column in a given table in a given database
+ * if such statistics exist.
+ * @param catName catalog name.
+ * @param dbName name of the database, defaults to current database
+ * @param tableName name of the table
+ * @param colName names of the columns for which statistics is requested
+ * @param txnId transaction id of the calling transaction
+ * @param writeIdList string format of valid writeId transaction list
+ * @return Relevant column statistics for the column for the given table
+ * @throws NoSuchObjectException No such table
+ * @throws MetaException error accessing the RDBMS
+ *
+ */
+ ColumnStatistics getTableColumnStatistics(
+ String catName, String dbName, String tableName,
+ List<String> colName, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException;
+
+ /**
* Get statistics for a partition for a set of columns.
* @param catName catalog name.
* @param dbName database name.
@@ -916,6 +912,25 @@ public interface RawStore extends Configurable {
throws MetaException, NoSuchObjectException;
/**
+ * Get statistics for a partition for a set of columns.
+ * @param catName catalog name.
+ * @param dbName database name.
+ * @param tblName table name.
+ * @param partNames list of partition names. These are names so must be key1=val1[/key2=val2...]
+ * @param colNames list of columns to get stats for
+ * @param txnId transaction id of the calling transaction
+ * @param writeIdList string format of valid writeId transaction list
+ * @return list of statistics objects
+ * @throws MetaException error accessing the RDBMS
+ * @throws NoSuchObjectException no such partition.
+ */
+ List<ColumnStatistics> getPartitionColumnStatistics(
+ String catName, String dbName, String tblName,
+ List<String> partNames, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException;
+
+ /**
* Deletes column statistics if present associated with a given db, table, partition and col. If
* null is passed instead of a colName, stats when present for all columns associated
* with a given db, table and partition are deleted.
@@ -1159,6 +1174,25 @@ public interface RawStore extends Configurable {
List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException;
/**
+ * Get aggregated stats for a table or partition(s).
+ * @param catName catalog name.
+ * @param dbName database name.
+ * @param tblName table name.
+ * @param partNames list of partition names. These are the names of the partitions, not
+ * values.
+ * @param colNames list of column names
+ * @param txnId transaction id of the calling transaction
+ * @param writeIdList string format of valid writeId transaction list
+ * @return aggregated stats
+ * @throws MetaException error accessing RDBMS
+ * @throws NoSuchObjectException no such table or partition
+ */
+ AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
+ List<String> partNames, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException;
+
+ /**
* Get column stats for all partitions of all tables in the database
* @param catName catalog name
* @param dbName database name
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 7c3588d..ad05051 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -49,68 +49,10 @@ import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
import org.apache.hadoop.hive.metastore.RawStore;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.ISchema;
-import org.apache.hadoop.hive.metastore.api.ISchemaName;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.metastore.cache.SharedCache.StatsType;
import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator;
import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.RuntimeStat;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SchemaVersion;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
@@ -879,20 +821,29 @@ public class CachedStore implements RawStore, Configurable {
@Override
public Table getTable(String catName, String dbName, String tblName) throws MetaException {
+ return getTable(catName, dbName, tblName, -1, null);
+ }
+
+ // TODO: if writeIdList is not null, check isolation level compliance for SVS,
+ // possibly with getTableFromCache() with table snapshot in cache.
+ @Override
+ public Table getTable(String catName, String dbName, String tblName,
+ long txnId, String writeIdList)
+ throws MetaException {
catName = normalizeIdentifier(catName);
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName)) {
- return rawStore.getTable(catName, dbName, tblName);
+ return rawStore.getTable(catName, dbName, tblName, txnId,writeIdList);
}
Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
- if (tbl == null) {
+ if (tbl == null || writeIdList != null) {
// This table is not yet loaded in cache
// If the prewarm thread is working on this table's database,
// let's move this table to the top of tblNamesBeingPrewarmed stack,
// so that it gets loaded to the cache faster and is available for subsequent requests
tblsPendingPrewarm.prioritizeTableForPrewarm(tblName);
- return rawStore.getTable(catName, dbName, tblName);
+ return rawStore.getTable(catName, dbName, tblName, txnId, writeIdList);
}
if (tbl != null) {
tbl.unsetPrivileges();
@@ -955,16 +906,26 @@ public class CachedStore implements RawStore, Configurable {
@Override
public Partition getPartition(String catName, String dbName, String tblName, List<String> part_vals)
throws MetaException, NoSuchObjectException {
+ return getPartition(catName, dbName, tblName, part_vals, -1, null);
+ }
+
+ // TODO: the same as getTable()
+ @Override
+ public Partition getPartition(String catName, String dbName, String tblName,
+ List<String> part_vals, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
catName = normalizeIdentifier(catName);
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName)) {
- return rawStore.getPartition(catName, dbName, tblName, part_vals);
+ return rawStore.getPartition(
+ catName, dbName, tblName, part_vals, txnId, writeIdList);
}
Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals);
- if (part == null) {
+ if (part == null || writeIdList != null) {
// The table containing the partition is not yet loaded in cache
- return rawStore.getPartition(catName, dbName, tblName, part_vals);
+ return rawStore.getPartition(
+ catName, dbName, tblName, part_vals, txnId, writeIdList);
}
return part;
}
@@ -1204,15 +1165,17 @@ public class CachedStore implements RawStore, Configurable {
@Override
public void alterPartitions(String catName, String dbName, String tblName,
- List<List<String>> partValsList, List<Partition> newParts)
+ List<List<String>> partValsList, List<Partition> newParts,
+ long txnId, String writeIdList)
throws InvalidObjectException, MetaException {
- rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts);
+ rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, txnId, writeIdList);
catName = normalizeIdentifier(catName);
dbName = normalizeIdentifier(dbName);
tblName = normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName)) {
return;
}
+ // TODO: modify the following method for the case when writeIdList != null.
sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts);
}
@@ -1656,16 +1619,27 @@ public class CachedStore implements RawStore, Configurable {
@Override
public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName,
List<String> colNames) throws MetaException, NoSuchObjectException {
+ return getTableColumnStatistics(catName, dbName, tblName, colNames, -1, null);
+ }
+
+ // TODO: the same as getTable()
+ @Override
+ public ColumnStatistics getTableColumnStatistics(
+ String catName, String dbName, String tblName, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
catName = StringUtils.normalizeIdentifier(catName);
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName)) {
- return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
+ return rawStore.getTableColumnStatistics(
+ catName, dbName, tblName, colNames, txnId, writeIdList);
}
Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
- if (table == null) {
+ if (table == null || writeIdList != null) {
// The table is not yet loaded in cache
- return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
+ return rawStore.getTableColumnStatistics(
+ catName, dbName, tblName, colNames, txnId, writeIdList);
}
ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName);
List<ColumnStatisticsObj> colStatObjs =
@@ -1723,6 +1697,15 @@ public class CachedStore implements RawStore, Configurable {
}
@Override
+ public List<ColumnStatistics> getPartitionColumnStatistics(
+ String catName, String dbName, String tblName, List<String> partNames,
+ List<String> colNames, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return rawStore.getPartitionColumnStatistics(
+ catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
+ }
+
+ @Override
public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, String partName,
List<String> partVals, String colName)
throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
@@ -1743,17 +1726,28 @@ public class CachedStore implements RawStore, Configurable {
@Override
public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List<String> partNames,
List<String> colNames) throws MetaException, NoSuchObjectException {
+ return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, -1, null);
+ }
+
+ @Override
+ // TODO: the same as getTable() for transactional stats.
+ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
+ List<String> partNames, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
List<ColumnStatisticsObj> colStats;
catName = normalizeIdentifier(catName);
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName)) {
- rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+ rawStore.get_aggr_stats_for(
+ catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
}
Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
- if (table == null) {
+ if (table == null || writeIdList != null) {
// The table is not yet loaded in cache
- return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+ return rawStore.get_aggr_stats_for(
+ catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
}
List<String> allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
if (partNames.size() == allPartNames.size()) {
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
index 4a97f89..56f9048 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
@@ -30,7 +30,8 @@ public class MPartition {
private int lastAccessTime;
private MStorageDescriptor sd;
private Map<String, String> parameters;
-
+ private long txnId;
+ private String writeIdList;
public MPartition() {}
@@ -152,4 +153,19 @@ public class MPartition {
this.createTime = createTime;
}
+ public long getTxnId() {
+ return txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ }
+
+ public String getWriteIdList() {
+ return writeIdList;
+ }
+
+ public void setWriteIdList(String writeIdList) {
+ this.writeIdList = writeIdList;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
index 50d9c5b..ff68eba 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java
@@ -55,6 +55,7 @@ public class MPartitionColumnStatistics {
private Long numTrues;
private Long numFalses;
private long lastAnalyzed;
+ private long txnId;
public MPartitionColumnStatistics() {}
@@ -278,4 +279,12 @@ public class MPartitionColumnStatistics {
public void setBitVector(byte[] bitVector) {
this.bitVector = bitVector;
}
+
+ public long getTxnId() {
+ return txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
index 38ad479..7ef1ef6 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
@@ -1,3 +1,4 @@
+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@@ -37,6 +38,8 @@ public class MTable {
private String viewExpandedText;
private boolean rewriteEnabled;
private String tableType;
+ private long txnId;
+ private String writeIdList;
public MTable() {}
@@ -270,4 +273,20 @@ public class MTable {
public String getTableType() {
return tableType;
}
+
+ public long getTxnId() {
+ return txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ }
+
+ public String getWriteIdList() {
+ return writeIdList;
+ }
+
+ public void setWriteIdList(String writeIdList) {
+ this.writeIdList = writeIdList;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
index 731cd6f..9d687e4 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java
@@ -53,6 +53,7 @@ public class MTableColumnStatistics {
private Long numTrues;
private Long numFalses;
private long lastAnalyzed;
+ private long txnId;
public MTableColumnStatistics() {}
@@ -269,4 +270,12 @@ public class MTableColumnStatistics {
public void setBitVector(byte[] bitVector) {
this.bitVector = bitVector;
}
+
+ public long getTxnId() {
+ return txnId;
+ }
+
+ public void setTxnId(long txnId) {
+ this.txnId = txnId;
+ }
}
[37/67] [abbrv] hive git commit: HIVE-19569: alter table db1.t1
rename db2.t2 generates MetaStoreEventListener.onDropTable() (Mahesh Kumar
Behera, reviewed by Sankar Hariappan)
Posted by se...@apache.org.
HIVE-19569: alter table db1.t1 rename db2.t2 generates MetaStoreEventListener.onDropTable() (Mahesh Kumar Behera, reviewed by Sankar Hariappan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d60bc73a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d60bc73a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d60bc73a
Branch: refs/heads/master-txnstats
Commit: d60bc73afcc6e755d499976baa54661a9680ed54
Parents: 3eaca1f
Author: Sankar Hariappan <sa...@apache.org>
Authored: Sat Jun 16 23:27:24 2018 -0700
Committer: Sankar Hariappan <sa...@apache.org>
Committed: Sat Jun 16 23:27:24 2018 -0700
----------------------------------------------------------------------
.../hadoop/hive/ql/TestTxnConcatenate.java | 24 ++---
.../hadoop/hive/metastore/HiveAlterHandler.java | 92 ++++----------------
.../hadoop/hive/metastore/HiveMetaStore.java | 31 +++----
.../TestTablesCreateDropAlterTruncate.java | 2 +-
4 files changed, 45 insertions(+), 104 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/d60bc73a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
index 511198a..0e436e1 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnConcatenate.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
import org.apache.hadoop.hive.metastore.txn.TxnUtils;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
@@ -225,14 +224,19 @@ public class TestTxnConcatenate extends TxnCommandsBaseForTests {
Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
"select count(*) from NEXT_WRITE_ID where NWI_TABLE='s'"));
- //this causes MetaStoreEvenListener.onDropTable()/onCreateTable() to execute and the data
- //files are just moved under new table. This can't work since a drop table in Acid removes
- //the relevant table metadata (like writeid, etc.), so writeIds in file names/ROW_IDs
- //no longer make sense. (In fact 'select ...' returns nothing since there is no NEXT_WRITE_ID
- //entry for the 'new' table and all existing data is 'above HWM'. see HIVE-19569
- CommandProcessorResponse cpr =
- runStatementOnDriverNegative("alter table mydb1.S RENAME TO mydb2.bar");
- Assert.assertTrue(cpr.getErrorMessage() != null && cpr.getErrorMessage()
- .contains("Changing database name of a transactional table mydb1.s is not supported."));
+ runStatementOnDriver("alter table mydb1.S RENAME TO mydb2.bar");
+
+ Assert.assertEquals(
+ TxnDbUtil.queryToString(hiveConf, "select * from COMPLETED_TXN_COMPONENTS"), 2,
+ TxnDbUtil.countQueryAgent(hiveConf,
+ "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_TABLE='bar'"));
+ Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
+ "select count(*) from COMPACTION_QUEUE where CQ_TABLE='bar'"));
+ Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
+ "select count(*) from WRITE_SET where WS_TABLE='bar'"));
+ Assert.assertEquals(2, TxnDbUtil.countQueryAgent(hiveConf,
+ "select count(*) from TXN_TO_WRITE_ID where T2W_TABLE='bar'"));
+ Assert.assertEquals(1, TxnDbUtil.countQueryAgent(hiveConf,
+ "select count(*) from NEXT_WRITE_ID where NWI_TABLE='bar'"));
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/d60bc73a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 33999d0..c2da6d3 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -23,11 +23,8 @@ import com.google.common.collect.Lists;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
-import org.apache.hadoop.hive.metastore.events.DropTableEvent;
import org.apache.hadoop.hive.metastore.messaging.EventMessage;
import org.apache.hadoop.hive.metastore.utils.FileUtils;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
@@ -103,7 +100,7 @@ public class HiveAlterHandler implements AlterHandler {
&& StatsSetupConst.TRUE.equals(environmentContext.getProperties().get(
StatsSetupConst.CASCADE));
if (newt == null) {
- throw new InvalidOperationException("New table is invalid: " + newt);
+ throw new InvalidOperationException("New table is null");
}
String newTblName = newt.getTableName().toLowerCase();
@@ -131,14 +128,9 @@ public class HiveAlterHandler implements AlterHandler {
List<TransactionalMetaStoreEventListener> transactionalListeners = null;
List<MetaStoreEventListener> listeners = null;
Map<String, String> txnAlterTableEventResponses = Collections.emptyMap();
- Map<String, String> txnDropTableEventResponses = Collections.emptyMap();
- Map<String, String> txnCreateTableEventResponses = Collections.emptyMap();
- Map<String, String> txnAddPartitionEventResponses = Collections.emptyMap();
- if (handler != null) {
- transactionalListeners = handler.getTransactionalListeners();
- listeners = handler.getListeners();
- }
+ transactionalListeners = handler.getTransactionalListeners();
+ listeners = handler.getListeners();
try {
boolean rename = false;
@@ -253,6 +245,14 @@ public class HiveAlterHandler implements AlterHandler {
" failed to move data due to: '" + getSimpleMessage(e)
+ "' See hive log file for details.");
}
+
+ if (!HiveMetaStore.isRenameAllowed(olddb, db)) {
+ LOG.error("Alter Table operation for " + TableName.getQualified(catName, dbname, name) +
+ "to new table = " + TableName.getQualified(catName, newDbName, newTblName) + " failed ");
+ throw new MetaException("Alter table not allowed for table " +
+ TableName.getQualified(catName, dbname, name) +
+ "to new table = " + TableName.getQualified(catName, newDbName, newTblName));
+ }
}
if (isPartitionedTable) {
@@ -349,29 +349,10 @@ public class HiveAlterHandler implements AlterHandler {
}
if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
- if (oldt.getDbName().equalsIgnoreCase(newt.getDbName())) {
- txnAlterTableEventResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+ txnAlterTableEventResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
EventMessage.EventType.ALTER_TABLE,
new AlterTableEvent(oldt, newt, false, true, handler),
environmentContext);
- } else {
- txnDropTableEventResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
- EventMessage.EventType.DROP_TABLE,
- new DropTableEvent(oldt, true, false, handler),
- environmentContext);
- txnCreateTableEventResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
- EventMessage.EventType.CREATE_TABLE,
- new CreateTableEvent(newt, true, handler),
- environmentContext);
- if (isPartitionedTable) {
- String cName = newt.isSetCatName() ? newt.getCatName() : DEFAULT_CATALOG_NAME;
- parts = msdb.getPartitions(cName, newt.getDbName(), newt.getTableName(), -1);
- txnAddPartitionEventResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
- EventMessage.EventType.ADD_PARTITION,
- new AddPartitionEvent(newt, parts, true, handler),
- environmentContext);
- }
- }
}
// commit the changes
success = msdb.commitTransaction();
@@ -411,49 +392,12 @@ public class HiveAlterHandler implements AlterHandler {
}
if (!listeners.isEmpty()) {
- // An ALTER_TABLE event will be created for any alter table operation happening inside the same
- // database, otherwise a rename between databases is considered a DROP_TABLE from the old database
- // and a CREATE_TABLE in the new database plus ADD_PARTITION operations if needed.
- if (!success || dbname.equalsIgnoreCase(newDbName)) {
- // I don't think event notifications in case of failures are necessary, but other HMS operations
- // make this call whether the event failed or succeeded. To make this behavior consistent, then
- // this call will be made also for failed events even for renaming the table between databases
- // to avoid a large list of ADD_PARTITION unnecessary failed events.
- MetaStoreListenerNotifier.notifyEvent(listeners, EventMessage.EventType.ALTER_TABLE,
- new AlterTableEvent(oldt, newt, false, success, handler),
- environmentContext, txnAlterTableEventResponses, msdb);
- } else {
- if(oldt.getParameters() != null && "true".equalsIgnoreCase(
- oldt.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))) {
- /*Why does it split Alter into Drop + Create here????? This causes onDropTable logic
- * to wipe out acid related metadata and writeIds from old table don't make sense
- * in the new table.*/
- throw new IllegalStateException("Changing database name of a transactional table " +
- Warehouse.getQualifiedName(oldt) + " is not supported. Please use create-table-as" +
- " or create new table manually followed by Insert.");
- }
- MetaStoreListenerNotifier.notifyEvent(listeners, EventMessage.EventType.DROP_TABLE,
- new DropTableEvent(oldt, true, false, handler),
- environmentContext, txnDropTableEventResponses, msdb);
-
- MetaStoreListenerNotifier.notifyEvent(listeners, EventMessage.EventType.CREATE_TABLE,
- new CreateTableEvent(newt, true, handler),
- environmentContext, txnCreateTableEventResponses, msdb);
-
- if (isPartitionedTable) {
- try {
- List<Partition> parts = msdb.getPartitions(catName, newDbName, newTblName, -1);
- MetaStoreListenerNotifier.notifyEvent(listeners, EventMessage.EventType.ADD_PARTITION,
- new AddPartitionEvent(newt, parts, true, handler),
- environmentContext, txnAddPartitionEventResponses, msdb);
- } catch (NoSuchObjectException e) {
- // Just log the error but not throw an exception as this post-commit event should
- // not cause the HMS operation to fail.
- LOG.error("ADD_PARTITION events for ALTER_TABLE rename operation cannot continue because the following " +
- "table was not found on the metastore: " + newDbName + "." + newTblName, e);
- }
- }
- }
+ // I don't think event notifications in case of failures are necessary, but other HMS operations
+ // make this call whether the event failed or succeeded. To make this behavior consistent,
+ // this call is made for failed events also.
+ MetaStoreListenerNotifier.notifyEvent(listeners, EventMessage.EventType.ALTER_TABLE,
+ new AlterTableEvent(oldt, newt, false, success, handler),
+ environmentContext, txnAlterTableEventResponses, msdb);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/d60bc73a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index b456e40..e88f9a5 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -241,6 +241,15 @@ public class HiveMetaStore extends ThriftHiveMetastore {
}
}
+ public static boolean isRenameAllowed(Database srcDB, Database destDB) {
+ if (!srcDB.getName().equalsIgnoreCase(destDB.getName())) {
+ if (ReplChangeManager.isSourceOfReplication(srcDB) || ReplChangeManager.isSourceOfReplication(destDB)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
public static class HMSHandler extends FacebookBase implements IHMSHandler {
public static final Logger LOG = HiveMetaStore.LOG;
private final Configuration conf; // stores datastore (jpox) properties,
@@ -3894,19 +3903,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
return new Partition();
}
- private boolean isRenameAllowed(String catalogName, String srcDBName, String destDBName)
- throws MetaException, NoSuchObjectException {
- RawStore ms = getMS();
- if (!srcDBName.equalsIgnoreCase(destDBName)) {
- Database destDB = ms.getDatabase(catalogName, destDBName);
- Database srcDB = ms.getDatabase(catalogName, srcDBName);
- if (ReplChangeManager.isSourceOfReplication(srcDB) || ReplChangeManager.isSourceOfReplication(destDB)) {
- return false;
- }
- }
- return true;
- }
-
@Override
public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
String sourceDbName, String sourceTableName, String destDbName,
@@ -3995,7 +3991,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
}
}
- if (!isRenameAllowed(parsedDestDbName[CAT_NAME], parsedSourceDbName[DB_NAME], parsedDestDbName[DB_NAME])) {
+ Database srcDb = ms.getDatabase(parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME]);
+ Database destDb = ms.getDatabase(parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME]);
+ if (!isRenameAllowed(srcDb, destDb)) {
throw new MetaException("Exchange partition not allowed for " +
TableName.getQualified(parsedSourceDbName[CAT_NAME],
parsedSourceDbName[DB_NAME], sourceTableName) + " Dest db : " + destDbName);
@@ -5004,11 +5002,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
Exception ex = null;
try {
Table oldt = get_table_core(catName, dbname, name);
- if (!isRenameAllowed(catName, dbname, newTable.getDbName())) {
- throw new MetaException("Alter table not allowed for table " +
- TableName.getQualified(catName, dbname, name) +
- " new table = " + getCatalogQualifiedTableName(newTable));
- }
firePreEvent(new PreAlterTableEvent(oldt, newTable, this));
alterHandler.alterTable(getMS(), wh, catName, dbname, name, newTable,
envContext, this);
http://git-wip-us.apache.org/repos/asf/hive/blob/d60bc73a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
index 7ad8053..e1c3dcb 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
@@ -895,7 +895,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest {
}
}
- @Test(expected = InvalidOperationException.class)
+ @Test(expected = MetaException.class)
public void testAlterTableNullDatabaseInNew() throws Exception {
Table originalTable = testTables[0];
Table newTable = originalTable.deepCopy();
[32/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader
after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/llap_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_acid.q.out b/ql/src/test/results/clientpositive/llap/llap_acid.q.out
index 6196efe..635f928 100644
--- a/ql/src/test/results/clientpositive/llap/llap_acid.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_acid.q.out
@@ -124,8 +124,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -269,8 +269,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -378,8 +378,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/llap_acid2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_acid2.q.out b/ql/src/test/results/clientpositive/llap/llap_acid2.q.out
index 4d74a17..c3e9c2a 100644
--- a/ql/src/test/results/clientpositive/llap/llap_acid2.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_acid2.q.out
@@ -16,8 +16,10 @@ PREHOOK: query: CREATE TABLE orc_llap_n2 (
cfloat1 FLOAT,
cdouble1 DOUBLE,
cstring1 string,
- cfloat2 float
-) stored as orc TBLPROPERTIES ('transactional'='true')
+ cfloat2 float,
+ cdecimal1 decimal(10,3),
+ cdecimal2 decimal(38,10)
+) stored as orc TBLPROPERTIES ('transactional'='true','orc.write.format'='UNSTABLE-PRE-2.0')
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_llap_n2
@@ -35,8 +37,10 @@ POSTHOOK: query: CREATE TABLE orc_llap_n2 (
cfloat1 FLOAT,
cdouble1 DOUBLE,
cstring1 string,
- cfloat2 float
-) stored as orc TBLPROPERTIES ('transactional'='true')
+ cfloat2 float,
+ cdecimal1 decimal(10,3),
+ cdecimal2 decimal(38,10)
+) stored as orc TBLPROPERTIES ('transactional'='true','orc.write.format'='UNSTABLE-PRE-2.0')
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_llap_n2
@@ -44,7 +48,8 @@ PREHOOK: query: insert into table orc_llap_n2
select cint, cbigint, cfloat, cdouble,
cint as c1, cbigint as c2, cfloat as c3, cdouble as c4,
cint as c8, cbigint as c7, cfloat as c6, cdouble as c5,
- cstring1, cfloat as c9 from alltypesorc order by cdouble asc limit 30
+ cstring1, cfloat as c9, cast("1.123" as decimal(10,3))as c10,
+ cast("1.123456789" as decimal(38,18)) as c11 from alltypesorc order by cdouble asc limit 30
PREHOOK: type: QUERY
PREHOOK: Input: default@alltypesorc
PREHOOK: Output: default@orc_llap_n2
@@ -52,13 +57,58 @@ POSTHOOK: query: insert into table orc_llap_n2
select cint, cbigint, cfloat, cdouble,
cint as c1, cbigint as c2, cfloat as c3, cdouble as c4,
cint as c8, cbigint as c7, cfloat as c6, cdouble as c5,
- cstring1, cfloat as c9 from alltypesorc order by cdouble asc limit 30
+ cstring1, cfloat as c9, cast("1.123" as decimal(10,3))as c10,
+ cast("1.123456789" as decimal(38,18)) as c11 from alltypesorc order by cdouble asc limit 30
POSTHOOK: type: QUERY
POSTHOOK: Input: default@alltypesorc
POSTHOOK: Output: default@orc_llap_n2
POSTHOOK: Lineage: orc_llap_n2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
POSTHOOK: Lineage: orc_llap_n2.cbigint0 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
POSTHOOK: Lineage: orc_llap_n2.cbigint1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cdecimal1 SIMPLE []
+POSTHOOK: Lineage: orc_llap_n2.cdecimal2 EXPRESSION []
+POSTHOOK: Lineage: orc_llap_n2.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cdouble0 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cdouble1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cfloat0 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cfloat1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cfloat2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cint0 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cint1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: alter table orc_llap_n2 set TBLPROPERTIES ('transactional'='true','orc.write.format'='0.12')
+PREHOOK: type: ALTERTABLE_PROPERTIES
+PREHOOK: Input: default@orc_llap_n2
+PREHOOK: Output: default@orc_llap_n2
+POSTHOOK: query: alter table orc_llap_n2 set TBLPROPERTIES ('transactional'='true','orc.write.format'='0.12')
+POSTHOOK: type: ALTERTABLE_PROPERTIES
+POSTHOOK: Input: default@orc_llap_n2
+POSTHOOK: Output: default@orc_llap_n2
+PREHOOK: query: insert into table orc_llap_n2
+select cint, cbigint, cfloat, cdouble,
+ cint as c1, cbigint as c2, cfloat as c3, cdouble as c4,
+ cint as c8, cbigint as c7, cfloat as c6, cdouble as c5,
+ cstring1, cfloat as c9, cast("3.321" as decimal(10,3))as c10,
+ cast("9.987654321" as decimal(38,18)) as c11 from alltypesorc order by cdouble asc limit 30
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@orc_llap_n2
+POSTHOOK: query: insert into table orc_llap_n2
+select cint, cbigint, cfloat, cdouble,
+ cint as c1, cbigint as c2, cfloat as c3, cdouble as c4,
+ cint as c8, cbigint as c7, cfloat as c6, cdouble as c5,
+ cstring1, cfloat as c9, cast("3.321" as decimal(10,3))as c10,
+ cast("9.987654321" as decimal(38,18)) as c11 from alltypesorc order by cdouble asc limit 30
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@orc_llap_n2
+POSTHOOK: Lineage: orc_llap_n2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cbigint0 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cbigint1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n2.cdecimal1 SIMPLE []
+POSTHOOK: Lineage: orc_llap_n2.cdecimal2 EXPRESSION []
POSTHOOK: Lineage: orc_llap_n2.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
POSTHOOK: Lineage: orc_llap_n2.cdouble0 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
POSTHOOK: Lineage: orc_llap_n2.cdouble1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
@@ -84,8 +134,10 @@ PREHOOK: query: CREATE TABLE orc_llap2 (
cfloat1 FLOAT,
cdouble1 DOUBLE,
cstring1 string,
- cfloat2 float
-) stored as orc TBLPROPERTIES ('transactional'='false')
+ cfloat2 float,
+ cdecimal1 decimal(10,3),
+ cdecimal2 decimal(38,10)
+) stored as orc TBLPROPERTIES ('transactional'='false', 'orc.write.format'='UNSTABLE-PRE-2.0')
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orc_llap2
@@ -103,8 +155,10 @@ POSTHOOK: query: CREATE TABLE orc_llap2 (
cfloat1 FLOAT,
cdouble1 DOUBLE,
cstring1 string,
- cfloat2 float
-) stored as orc TBLPROPERTIES ('transactional'='false')
+ cfloat2 float,
+ cdecimal1 decimal(10,3),
+ cdecimal2 decimal(38,10)
+) stored as orc TBLPROPERTIES ('transactional'='false', 'orc.write.format'='UNSTABLE-PRE-2.0')
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orc_llap2
@@ -112,7 +166,8 @@ PREHOOK: query: insert into table orc_llap2
select cint, cbigint, cfloat, cdouble,
cint as c1, cbigint as c2, cfloat as c3, cdouble as c4,
cint as c8, cbigint as c7, cfloat as c6, cdouble as c5,
- cstring1, cfloat as c9 from alltypesorc order by cdouble asc limit 30
+ cstring1, cfloat as c9, cast("1.123" as decimal(10,3))as c10,
+ cast("1.123456789" as decimal(38,18)) as c11 from alltypesorc order by cdouble asc limit 30
PREHOOK: type: QUERY
PREHOOK: Input: default@alltypesorc
PREHOOK: Output: default@orc_llap2
@@ -120,13 +175,16 @@ POSTHOOK: query: insert into table orc_llap2
select cint, cbigint, cfloat, cdouble,
cint as c1, cbigint as c2, cfloat as c3, cdouble as c4,
cint as c8, cbigint as c7, cfloat as c6, cdouble as c5,
- cstring1, cfloat as c9 from alltypesorc order by cdouble asc limit 30
+ cstring1, cfloat as c9, cast("1.123" as decimal(10,3))as c10,
+ cast("1.123456789" as decimal(38,18)) as c11 from alltypesorc order by cdouble asc limit 30
POSTHOOK: type: QUERY
POSTHOOK: Input: default@alltypesorc
POSTHOOK: Output: default@orc_llap2
POSTHOOK: Lineage: orc_llap2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
POSTHOOK: Lineage: orc_llap2.cbigint0 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
POSTHOOK: Lineage: orc_llap2.cbigint1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_llap2.cdecimal1 SIMPLE []
+POSTHOOK: Lineage: orc_llap2.cdecimal2 EXPRESSION []
POSTHOOK: Lineage: orc_llap2.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
POSTHOOK: Lineage: orc_llap2.cdouble0 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
POSTHOOK: Lineage: orc_llap2.cdouble1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
@@ -138,19 +196,21 @@ POSTHOOK: Lineage: orc_llap2.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(n
POSTHOOK: Lineage: orc_llap2.cint0 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
POSTHOOK: Lineage: orc_llap2.cint1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
POSTHOOK: Lineage: orc_llap2.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
-PREHOOK: query: alter table orc_llap2 set TBLPROPERTIES ('transactional'='true')
+PREHOOK: query: alter table orc_llap2 set TBLPROPERTIES ('transactional'='true','orc.write.format'='0.12')
PREHOOK: type: ALTERTABLE_PROPERTIES
PREHOOK: Input: default@orc_llap2
PREHOOK: Output: default@orc_llap2
-POSTHOOK: query: alter table orc_llap2 set TBLPROPERTIES ('transactional'='true')
+POSTHOOK: query: alter table orc_llap2 set TBLPROPERTIES ('transactional'='true','orc.write.format'='0.12')
POSTHOOK: type: ALTERTABLE_PROPERTIES
POSTHOOK: Input: default@orc_llap2
POSTHOOK: Output: default@orc_llap2
-PREHOOK: query: update orc_llap2 set cstring1 = 'testvalue' where cstring1 = 'N016jPED08o'
+PREHOOK: query: update orc_llap2 set cstring1 = 'testvalue', cdecimal1 = cast("3.321" as decimal(10,3)),
+cdecimal2 = cast("9.987654321" as decimal(38,18)) where cstring1 = 'N016jPED08o'
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_llap2
PREHOOK: Output: default@orc_llap2
-POSTHOOK: query: update orc_llap2 set cstring1 = 'testvalue' where cstring1 = 'N016jPED08o'
+POSTHOOK: query: update orc_llap2 set cstring1 = 'testvalue', cdecimal1 = cast("3.321" as decimal(10,3)),
+cdecimal2 = cast("9.987654321" as decimal(38,18)) where cstring1 = 'N016jPED08o'
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_llap2
POSTHOOK: Output: default@orc_llap2
@@ -192,6 +252,36 @@ xTlDv24JYv4s
7wH3hBKdO55Xq3gEEe0
5QLs0LVK1g
ET3d4F2I4lV
+N016jPED08o
+Q1JAdUlCVORmR0Q5X5Vf5u6
+eNsh5tYa
+5j7GJ8OCXgMVIcK7
+uJGHsW3cd073NGFITyQ
+G1u0pUmU6ehCm
+mk6lShdOa8kXT8i7mLd3fK
+u5C7glqT5XqtO0JE2686lk1
+h4omSc1jcLLwW
+tFY2ng51v
+vmAT10eeE47fgH20pLi
+uN803aW
+qqbDw46IgGds4
+32v414p63Jv1B4tO1xy
+73xdw4X
+d3o1712a03n20qvi62U7
+eQ80MW0h728I204P87YXc
+KHtD2A2hp6OjFgS73gdgE
+nI30tm7U55O0gI
+LSJtFA66
+mby00c
+meGb5
+pM6Gt05s1YJeii
+LR2AKy0dPt8vFdIV5760jriw
+1B3WMD5LSk65B2Moa
+xTlDv24JYv4s
+28Oe6r21yux7Lk47
+7wH3hBKdO55Xq3gEEe0
+5QLs0LVK1g
+ET3d4F2I4lV
PREHOOK: query: select cfloat2, cint from orc_llap_n2
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_llap_n2
@@ -230,6 +320,36 @@ NULL -899422227
11.0 385623629
11.0 681126962
11.0 25892751
+NULL -838810013
+NULL 246423894
+NULL 708885482
+NULL 186967185
+NULL -595277064
+NULL 584923170
+NULL 518213127
+NULL -334595454
+NULL 241008004
+NULL 185212032
+NULL -738747840
+NULL -971543377
+NULL 940448896
+NULL -324030556
+NULL -899422227
+11.0 835111400
+11.0 -775326158
+11.0 653630202
+11.0 779427499
+11.0 797003983
+11.0 31832752
+11.0 783790031
+11.0 -898241885
+11.0 NULL
+11.0 -646295381
+11.0 130912195
+11.0 -391573084
+11.0 385623629
+11.0 681126962
+11.0 25892751
PREHOOK: query: select * from orc_llap_n2
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_llap_n2
@@ -238,36 +358,66 @@ POSTHOOK: query: select * from orc_llap_n2
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_llap_n2
#### A masked pattern was here ####
--838810013 1864027286 NULL NULL -838810013 1864027286 NULL NULL -838810013 1864027286 NULL NULL N016jPED08o NULL
-246423894 -1645852809 NULL NULL 246423894 -1645852809 NULL NULL 246423894 -1645852809 NULL NULL Q1JAdUlCVORmR0Q5X5Vf5u6 NULL
-708885482 -1645852809 NULL NULL 708885482 -1645852809 NULL NULL 708885482 -1645852809 NULL NULL eNsh5tYa NULL
-186967185 -1645852809 NULL NULL 186967185 -1645852809 NULL NULL 186967185 -1645852809 NULL NULL 5j7GJ8OCXgMVIcK7 NULL
--595277064 -1645852809 NULL NULL -595277064 -1645852809 NULL NULL -595277064 -1645852809 NULL NULL uJGHsW3cd073NGFITyQ NULL
-584923170 -1645852809 NULL NULL 584923170 -1645852809 NULL NULL 584923170 -1645852809 NULL NULL G1u0pUmU6ehCm NULL
-518213127 -1645852809 NULL NULL 518213127 -1645852809 NULL NULL 518213127 -1645852809 NULL NULL mk6lShdOa8kXT8i7mLd3fK NULL
--334595454 -1645852809 NULL NULL -334595454 -1645852809 NULL NULL -334595454 -1645852809 NULL NULL u5C7glqT5XqtO0JE2686lk1 NULL
-241008004 -1645852809 NULL NULL 241008004 -1645852809 NULL NULL 241008004 -1645852809 NULL NULL h4omSc1jcLLwW NULL
-185212032 -1645852809 NULL NULL 185212032 -1645852809 NULL NULL 185212032 -1645852809 NULL NULL tFY2ng51v NULL
--738747840 -1645852809 NULL NULL -738747840 -1645852809 NULL NULL -738747840 -1645852809 NULL NULL vmAT10eeE47fgH20pLi NULL
--971543377 -1645852809 NULL NULL -971543377 -1645852809 NULL NULL -971543377 -1645852809 NULL NULL uN803aW NULL
-940448896 -1645852809 NULL NULL 940448896 -1645852809 NULL NULL 940448896 -1645852809 NULL NULL qqbDw46IgGds4 NULL
--324030556 -1645852809 NULL NULL -324030556 -1645852809 NULL NULL -324030556 -1645852809 NULL NULL 32v414p63Jv1B4tO1xy NULL
--899422227 -1645852809 NULL NULL -899422227 -1645852809 NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X NULL
-835111400 1964238982 11.0 NULL 835111400 1964238982 11.0 NULL 835111400 1964238982 11.0 NULL d3o1712a03n20qvi62U7 11.0
--775326158 -1289793978 11.0 NULL -775326158 -1289793978 11.0 NULL -775326158 -1289793978 11.0 NULL eQ80MW0h728I204P87YXc 11.0
-653630202 1281184487 11.0 NULL 653630202 1281184487 11.0 NULL 653630202 1281184487 11.0 NULL KHtD2A2hp6OjFgS73gdgE 11.0
-779427499 1326393090 11.0 NULL 779427499 1326393090 11.0 NULL 779427499 1326393090 11.0 NULL nI30tm7U55O0gI 11.0
-797003983 1186689849 11.0 NULL 797003983 1186689849 11.0 NULL 797003983 1186689849 11.0 NULL LSJtFA66 11.0
-31832752 1854212271 11.0 NULL 31832752 1854212271 11.0 NULL 31832752 1854212271 11.0 NULL mby00c 11.0
-783790031 -1482854823 11.0 NULL 783790031 -1482854823 11.0 NULL 783790031 -1482854823 11.0 NULL meGb5 11.0
--898241885 -1785664982 11.0 NULL -898241885 -1785664982 11.0 NULL -898241885 -1785664982 11.0 NULL pM6Gt05s1YJeii 11.0
-NULL -1083386085 11.0 NULL NULL -1083386085 11.0 NULL NULL -1083386085 11.0 NULL LR2AKy0dPt8vFdIV5760jriw 11.0
--646295381 -1654635859 11.0 NULL -646295381 -1654635859 11.0 NULL -646295381 -1654635859 11.0 NULL 1B3WMD5LSk65B2Moa 11.0
-130912195 -1286145901 11.0 NULL 130912195 -1286145901 11.0 NULL 130912195 -1286145901 11.0 NULL xTlDv24JYv4s 11.0
--391573084 -236100834 11.0 NULL -391573084 -236100834 11.0 NULL -391573084 -236100834 11.0 NULL 28Oe6r21yux7Lk47 11.0
-385623629 236101682 11.0 NULL 385623629 236101682 11.0 NULL 385623629 236101682 11.0 NULL 7wH3hBKdO55Xq3gEEe0 11.0
-681126962 993392163 11.0 NULL 681126962 993392163 11.0 NULL 681126962 993392163 11.0 NULL 5QLs0LVK1g 11.0
-25892751 -1978674520 11.0 NULL 25892751 -1978674520 11.0 NULL 25892751 -1978674520 11.0 NULL ET3d4F2I4lV 11.0
+-838810013 1864027286 NULL NULL -838810013 1864027286 NULL NULL -838810013 1864027286 NULL NULL N016jPED08o NULL 3.321 9.9876543210
+246423894 -1645852809 NULL NULL 246423894 -1645852809 NULL NULL 246423894 -1645852809 NULL NULL Q1JAdUlCVORmR0Q5X5Vf5u6 NULL 3.321 9.9876543210
+708885482 -1645852809 NULL NULL 708885482 -1645852809 NULL NULL 708885482 -1645852809 NULL NULL eNsh5tYa NULL 3.321 9.9876543210
+186967185 -1645852809 NULL NULL 186967185 -1645852809 NULL NULL 186967185 -1645852809 NULL NULL 5j7GJ8OCXgMVIcK7 NULL 3.321 9.9876543210
+-595277064 -1645852809 NULL NULL -595277064 -1645852809 NULL NULL -595277064 -1645852809 NULL NULL uJGHsW3cd073NGFITyQ NULL 3.321 9.9876543210
+584923170 -1645852809 NULL NULL 584923170 -1645852809 NULL NULL 584923170 -1645852809 NULL NULL G1u0pUmU6ehCm NULL 3.321 9.9876543210
+518213127 -1645852809 NULL NULL 518213127 -1645852809 NULL NULL 518213127 -1645852809 NULL NULL mk6lShdOa8kXT8i7mLd3fK NULL 3.321 9.9876543210
+-334595454 -1645852809 NULL NULL -334595454 -1645852809 NULL NULL -334595454 -1645852809 NULL NULL u5C7glqT5XqtO0JE2686lk1 NULL 3.321 9.9876543210
+241008004 -1645852809 NULL NULL 241008004 -1645852809 NULL NULL 241008004 -1645852809 NULL NULL h4omSc1jcLLwW NULL 3.321 9.9876543210
+185212032 -1645852809 NULL NULL 185212032 -1645852809 NULL NULL 185212032 -1645852809 NULL NULL tFY2ng51v NULL 3.321 9.9876543210
+-738747840 -1645852809 NULL NULL -738747840 -1645852809 NULL NULL -738747840 -1645852809 NULL NULL vmAT10eeE47fgH20pLi NULL 3.321 9.9876543210
+-971543377 -1645852809 NULL NULL -971543377 -1645852809 NULL NULL -971543377 -1645852809 NULL NULL uN803aW NULL 3.321 9.9876543210
+940448896 -1645852809 NULL NULL 940448896 -1645852809 NULL NULL 940448896 -1645852809 NULL NULL qqbDw46IgGds4 NULL 3.321 9.9876543210
+-324030556 -1645852809 NULL NULL -324030556 -1645852809 NULL NULL -324030556 -1645852809 NULL NULL 32v414p63Jv1B4tO1xy NULL 3.321 9.9876543210
+-899422227 -1645852809 NULL NULL -899422227 -1645852809 NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X NULL 3.321 9.9876543210
+835111400 1964238982 11.0 NULL 835111400 1964238982 11.0 NULL 835111400 1964238982 11.0 NULL d3o1712a03n20qvi62U7 11.0 3.321 9.9876543210
+-775326158 -1289793978 11.0 NULL -775326158 -1289793978 11.0 NULL -775326158 -1289793978 11.0 NULL eQ80MW0h728I204P87YXc 11.0 3.321 9.9876543210
+653630202 1281184487 11.0 NULL 653630202 1281184487 11.0 NULL 653630202 1281184487 11.0 NULL KHtD2A2hp6OjFgS73gdgE 11.0 3.321 9.9876543210
+779427499 1326393090 11.0 NULL 779427499 1326393090 11.0 NULL 779427499 1326393090 11.0 NULL nI30tm7U55O0gI 11.0 3.321 9.9876543210
+797003983 1186689849 11.0 NULL 797003983 1186689849 11.0 NULL 797003983 1186689849 11.0 NULL LSJtFA66 11.0 3.321 9.9876543210
+31832752 1854212271 11.0 NULL 31832752 1854212271 11.0 NULL 31832752 1854212271 11.0 NULL mby00c 11.0 3.321 9.9876543210
+783790031 -1482854823 11.0 NULL 783790031 -1482854823 11.0 NULL 783790031 -1482854823 11.0 NULL meGb5 11.0 3.321 9.9876543210
+-898241885 -1785664982 11.0 NULL -898241885 -1785664982 11.0 NULL -898241885 -1785664982 11.0 NULL pM6Gt05s1YJeii 11.0 3.321 9.9876543210
+NULL -1083386085 11.0 NULL NULL -1083386085 11.0 NULL NULL -1083386085 11.0 NULL LR2AKy0dPt8vFdIV5760jriw 11.0 3.321 9.9876543210
+-646295381 -1654635859 11.0 NULL -646295381 -1654635859 11.0 NULL -646295381 -1654635859 11.0 NULL 1B3WMD5LSk65B2Moa 11.0 3.321 9.9876543210
+130912195 -1286145901 11.0 NULL 130912195 -1286145901 11.0 NULL 130912195 -1286145901 11.0 NULL xTlDv24JYv4s 11.0 3.321 9.9876543210
+-391573084 -236100834 11.0 NULL -391573084 -236100834 11.0 NULL -391573084 -236100834 11.0 NULL 28Oe6r21yux7Lk47 11.0 3.321 9.9876543210
+385623629 236101682 11.0 NULL 385623629 236101682 11.0 NULL 385623629 236101682 11.0 NULL 7wH3hBKdO55Xq3gEEe0 11.0 3.321 9.9876543210
+681126962 993392163 11.0 NULL 681126962 993392163 11.0 NULL 681126962 993392163 11.0 NULL 5QLs0LVK1g 11.0 3.321 9.9876543210
+25892751 -1978674520 11.0 NULL 25892751 -1978674520 11.0 NULL 25892751 -1978674520 11.0 NULL ET3d4F2I4lV 11.0 3.321 9.9876543210
+-838810013 1864027286 NULL NULL -838810013 1864027286 NULL NULL -838810013 1864027286 NULL NULL N016jPED08o NULL 1.123 1.1234567890
+246423894 -1645852809 NULL NULL 246423894 -1645852809 NULL NULL 246423894 -1645852809 NULL NULL Q1JAdUlCVORmR0Q5X5Vf5u6 NULL 1.123 1.1234567890
+708885482 -1645852809 NULL NULL 708885482 -1645852809 NULL NULL 708885482 -1645852809 NULL NULL eNsh5tYa NULL 1.123 1.1234567890
+186967185 -1645852809 NULL NULL 186967185 -1645852809 NULL NULL 186967185 -1645852809 NULL NULL 5j7GJ8OCXgMVIcK7 NULL 1.123 1.1234567890
+-595277064 -1645852809 NULL NULL -595277064 -1645852809 NULL NULL -595277064 -1645852809 NULL NULL uJGHsW3cd073NGFITyQ NULL 1.123 1.1234567890
+584923170 -1645852809 NULL NULL 584923170 -1645852809 NULL NULL 584923170 -1645852809 NULL NULL G1u0pUmU6ehCm NULL 1.123 1.1234567890
+518213127 -1645852809 NULL NULL 518213127 -1645852809 NULL NULL 518213127 -1645852809 NULL NULL mk6lShdOa8kXT8i7mLd3fK NULL 1.123 1.1234567890
+-334595454 -1645852809 NULL NULL -334595454 -1645852809 NULL NULL -334595454 -1645852809 NULL NULL u5C7glqT5XqtO0JE2686lk1 NULL 1.123 1.1234567890
+241008004 -1645852809 NULL NULL 241008004 -1645852809 NULL NULL 241008004 -1645852809 NULL NULL h4omSc1jcLLwW NULL 1.123 1.1234567890
+185212032 -1645852809 NULL NULL 185212032 -1645852809 NULL NULL 185212032 -1645852809 NULL NULL tFY2ng51v NULL 1.123 1.1234567890
+-738747840 -1645852809 NULL NULL -738747840 -1645852809 NULL NULL -738747840 -1645852809 NULL NULL vmAT10eeE47fgH20pLi NULL 1.123 1.1234567890
+-971543377 -1645852809 NULL NULL -971543377 -1645852809 NULL NULL -971543377 -1645852809 NULL NULL uN803aW NULL 1.123 1.1234567890
+940448896 -1645852809 NULL NULL 940448896 -1645852809 NULL NULL 940448896 -1645852809 NULL NULL qqbDw46IgGds4 NULL 1.123 1.1234567890
+-324030556 -1645852809 NULL NULL -324030556 -1645852809 NULL NULL -324030556 -1645852809 NULL NULL 32v414p63Jv1B4tO1xy NULL 1.123 1.1234567890
+-899422227 -1645852809 NULL NULL -899422227 -1645852809 NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X NULL 1.123 1.1234567890
+835111400 1964238982 11.0 NULL 835111400 1964238982 11.0 NULL 835111400 1964238982 11.0 NULL d3o1712a03n20qvi62U7 11.0 1.123 1.1234567890
+-775326158 -1289793978 11.0 NULL -775326158 -1289793978 11.0 NULL -775326158 -1289793978 11.0 NULL eQ80MW0h728I204P87YXc 11.0 1.123 1.1234567890
+653630202 1281184487 11.0 NULL 653630202 1281184487 11.0 NULL 653630202 1281184487 11.0 NULL KHtD2A2hp6OjFgS73gdgE 11.0 1.123 1.1234567890
+779427499 1326393090 11.0 NULL 779427499 1326393090 11.0 NULL 779427499 1326393090 11.0 NULL nI30tm7U55O0gI 11.0 1.123 1.1234567890
+797003983 1186689849 11.0 NULL 797003983 1186689849 11.0 NULL 797003983 1186689849 11.0 NULL LSJtFA66 11.0 1.123 1.1234567890
+31832752 1854212271 11.0 NULL 31832752 1854212271 11.0 NULL 31832752 1854212271 11.0 NULL mby00c 11.0 1.123 1.1234567890
+783790031 -1482854823 11.0 NULL 783790031 -1482854823 11.0 NULL 783790031 -1482854823 11.0 NULL meGb5 11.0 1.123 1.1234567890
+-898241885 -1785664982 11.0 NULL -898241885 -1785664982 11.0 NULL -898241885 -1785664982 11.0 NULL pM6Gt05s1YJeii 11.0 1.123 1.1234567890
+NULL -1083386085 11.0 NULL NULL -1083386085 11.0 NULL NULL -1083386085 11.0 NULL LR2AKy0dPt8vFdIV5760jriw 11.0 1.123 1.1234567890
+-646295381 -1654635859 11.0 NULL -646295381 -1654635859 11.0 NULL -646295381 -1654635859 11.0 NULL 1B3WMD5LSk65B2Moa 11.0 1.123 1.1234567890
+130912195 -1286145901 11.0 NULL 130912195 -1286145901 11.0 NULL 130912195 -1286145901 11.0 NULL xTlDv24JYv4s 11.0 1.123 1.1234567890
+-391573084 -236100834 11.0 NULL -391573084 -236100834 11.0 NULL -391573084 -236100834 11.0 NULL 28Oe6r21yux7Lk47 11.0 1.123 1.1234567890
+385623629 236101682 11.0 NULL 385623629 236101682 11.0 NULL 385623629 236101682 11.0 NULL 7wH3hBKdO55Xq3gEEe0 11.0 1.123 1.1234567890
+681126962 993392163 11.0 NULL 681126962 993392163 11.0 NULL 681126962 993392163 11.0 NULL 5QLs0LVK1g 11.0 1.123 1.1234567890
+25892751 -1978674520 11.0 NULL 25892751 -1978674520 11.0 NULL 25892751 -1978674520 11.0 NULL ET3d4F2I4lV 11.0 1.123 1.1234567890
PREHOOK: query: select cstring1 from orc_llap2
PREHOOK: type: QUERY
PREHOOK: Input: default@orc_llap2
@@ -352,36 +502,36 @@ POSTHOOK: query: select * from orc_llap2
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orc_llap2
#### A masked pattern was here ####
-246423894 -1645852809 NULL NULL 246423894 -1645852809 NULL NULL 246423894 -1645852809 NULL NULL Q1JAdUlCVORmR0Q5X5Vf5u6 NULL
-708885482 -1645852809 NULL NULL 708885482 -1645852809 NULL NULL 708885482 -1645852809 NULL NULL eNsh5tYa NULL
-186967185 -1645852809 NULL NULL 186967185 -1645852809 NULL NULL 186967185 -1645852809 NULL NULL 5j7GJ8OCXgMVIcK7 NULL
--595277064 -1645852809 NULL NULL -595277064 -1645852809 NULL NULL -595277064 -1645852809 NULL NULL uJGHsW3cd073NGFITyQ NULL
-584923170 -1645852809 NULL NULL 584923170 -1645852809 NULL NULL 584923170 -1645852809 NULL NULL G1u0pUmU6ehCm NULL
-518213127 -1645852809 NULL NULL 518213127 -1645852809 NULL NULL 518213127 -1645852809 NULL NULL mk6lShdOa8kXT8i7mLd3fK NULL
--334595454 -1645852809 NULL NULL -334595454 -1645852809 NULL NULL -334595454 -1645852809 NULL NULL u5C7glqT5XqtO0JE2686lk1 NULL
-241008004 -1645852809 NULL NULL 241008004 -1645852809 NULL NULL 241008004 -1645852809 NULL NULL h4omSc1jcLLwW NULL
-185212032 -1645852809 NULL NULL 185212032 -1645852809 NULL NULL 185212032 -1645852809 NULL NULL tFY2ng51v NULL
--738747840 -1645852809 NULL NULL -738747840 -1645852809 NULL NULL -738747840 -1645852809 NULL NULL vmAT10eeE47fgH20pLi NULL
--971543377 -1645852809 NULL NULL -971543377 -1645852809 NULL NULL -971543377 -1645852809 NULL NULL uN803aW NULL
-940448896 -1645852809 NULL NULL 940448896 -1645852809 NULL NULL 940448896 -1645852809 NULL NULL qqbDw46IgGds4 NULL
--324030556 -1645852809 NULL NULL -324030556 -1645852809 NULL NULL -324030556 -1645852809 NULL NULL 32v414p63Jv1B4tO1xy NULL
--899422227 -1645852809 NULL NULL -899422227 -1645852809 NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X NULL
-835111400 1964238982 11.0 NULL 835111400 1964238982 11.0 NULL 835111400 1964238982 11.0 NULL d3o1712a03n20qvi62U7 11.0
--775326158 -1289793978 11.0 NULL -775326158 -1289793978 11.0 NULL -775326158 -1289793978 11.0 NULL eQ80MW0h728I204P87YXc 11.0
-653630202 1281184487 11.0 NULL 653630202 1281184487 11.0 NULL 653630202 1281184487 11.0 NULL KHtD2A2hp6OjFgS73gdgE 11.0
-779427499 1326393090 11.0 NULL 779427499 1326393090 11.0 NULL 779427499 1326393090 11.0 NULL nI30tm7U55O0gI 11.0
-797003983 1186689849 11.0 NULL 797003983 1186689849 11.0 NULL 797003983 1186689849 11.0 NULL LSJtFA66 11.0
-31832752 1854212271 11.0 NULL 31832752 1854212271 11.0 NULL 31832752 1854212271 11.0 NULL mby00c 11.0
-783790031 -1482854823 11.0 NULL 783790031 -1482854823 11.0 NULL 783790031 -1482854823 11.0 NULL meGb5 11.0
--898241885 -1785664982 11.0 NULL -898241885 -1785664982 11.0 NULL -898241885 -1785664982 11.0 NULL pM6Gt05s1YJeii 11.0
-NULL -1083386085 11.0 NULL NULL -1083386085 11.0 NULL NULL -1083386085 11.0 NULL LR2AKy0dPt8vFdIV5760jriw 11.0
--646295381 -1654635859 11.0 NULL -646295381 -1654635859 11.0 NULL -646295381 -1654635859 11.0 NULL 1B3WMD5LSk65B2Moa 11.0
-130912195 -1286145901 11.0 NULL 130912195 -1286145901 11.0 NULL 130912195 -1286145901 11.0 NULL xTlDv24JYv4s 11.0
--391573084 -236100834 11.0 NULL -391573084 -236100834 11.0 NULL -391573084 -236100834 11.0 NULL 28Oe6r21yux7Lk47 11.0
-385623629 236101682 11.0 NULL 385623629 236101682 11.0 NULL 385623629 236101682 11.0 NULL 7wH3hBKdO55Xq3gEEe0 11.0
-681126962 993392163 11.0 NULL 681126962 993392163 11.0 NULL 681126962 993392163 11.0 NULL 5QLs0LVK1g 11.0
-25892751 -1978674520 11.0 NULL 25892751 -1978674520 11.0 NULL 25892751 -1978674520 11.0 NULL ET3d4F2I4lV 11.0
--838810013 1864027286 NULL NULL -838810013 1864027286 NULL NULL -838810013 1864027286 NULL NULL testvalue NULL
+246423894 -1645852809 NULL NULL 246423894 -1645852809 NULL NULL 246423894 -1645852809 NULL NULL Q1JAdUlCVORmR0Q5X5Vf5u6 NULL 1.123 1.1234567890
+708885482 -1645852809 NULL NULL 708885482 -1645852809 NULL NULL 708885482 -1645852809 NULL NULL eNsh5tYa NULL 1.123 1.1234567890
+186967185 -1645852809 NULL NULL 186967185 -1645852809 NULL NULL 186967185 -1645852809 NULL NULL 5j7GJ8OCXgMVIcK7 NULL 1.123 1.1234567890
+-595277064 -1645852809 NULL NULL -595277064 -1645852809 NULL NULL -595277064 -1645852809 NULL NULL uJGHsW3cd073NGFITyQ NULL 1.123 1.1234567890
+584923170 -1645852809 NULL NULL 584923170 -1645852809 NULL NULL 584923170 -1645852809 NULL NULL G1u0pUmU6ehCm NULL 1.123 1.1234567890
+518213127 -1645852809 NULL NULL 518213127 -1645852809 NULL NULL 518213127 -1645852809 NULL NULL mk6lShdOa8kXT8i7mLd3fK NULL 1.123 1.1234567890
+-334595454 -1645852809 NULL NULL -334595454 -1645852809 NULL NULL -334595454 -1645852809 NULL NULL u5C7glqT5XqtO0JE2686lk1 NULL 1.123 1.1234567890
+241008004 -1645852809 NULL NULL 241008004 -1645852809 NULL NULL 241008004 -1645852809 NULL NULL h4omSc1jcLLwW NULL 1.123 1.1234567890
+185212032 -1645852809 NULL NULL 185212032 -1645852809 NULL NULL 185212032 -1645852809 NULL NULL tFY2ng51v NULL 1.123 1.1234567890
+-738747840 -1645852809 NULL NULL -738747840 -1645852809 NULL NULL -738747840 -1645852809 NULL NULL vmAT10eeE47fgH20pLi NULL 1.123 1.1234567890
+-971543377 -1645852809 NULL NULL -971543377 -1645852809 NULL NULL -971543377 -1645852809 NULL NULL uN803aW NULL 1.123 1.1234567890
+940448896 -1645852809 NULL NULL 940448896 -1645852809 NULL NULL 940448896 -1645852809 NULL NULL qqbDw46IgGds4 NULL 1.123 1.1234567890
+-324030556 -1645852809 NULL NULL -324030556 -1645852809 NULL NULL -324030556 -1645852809 NULL NULL 32v414p63Jv1B4tO1xy NULL 1.123 1.1234567890
+-899422227 -1645852809 NULL NULL -899422227 -1645852809 NULL NULL -899422227 -1645852809 NULL NULL 73xdw4X NULL 1.123 1.1234567890
+835111400 1964238982 11.0 NULL 835111400 1964238982 11.0 NULL 835111400 1964238982 11.0 NULL d3o1712a03n20qvi62U7 11.0 1.123 1.1234567890
+-775326158 -1289793978 11.0 NULL -775326158 -1289793978 11.0 NULL -775326158 -1289793978 11.0 NULL eQ80MW0h728I204P87YXc 11.0 1.123 1.1234567890
+653630202 1281184487 11.0 NULL 653630202 1281184487 11.0 NULL 653630202 1281184487 11.0 NULL KHtD2A2hp6OjFgS73gdgE 11.0 1.123 1.1234567890
+779427499 1326393090 11.0 NULL 779427499 1326393090 11.0 NULL 779427499 1326393090 11.0 NULL nI30tm7U55O0gI 11.0 1.123 1.1234567890
+797003983 1186689849 11.0 NULL 797003983 1186689849 11.0 NULL 797003983 1186689849 11.0 NULL LSJtFA66 11.0 1.123 1.1234567890
+31832752 1854212271 11.0 NULL 31832752 1854212271 11.0 NULL 31832752 1854212271 11.0 NULL mby00c 11.0 1.123 1.1234567890
+783790031 -1482854823 11.0 NULL 783790031 -1482854823 11.0 NULL 783790031 -1482854823 11.0 NULL meGb5 11.0 1.123 1.1234567890
+-898241885 -1785664982 11.0 NULL -898241885 -1785664982 11.0 NULL -898241885 -1785664982 11.0 NULL pM6Gt05s1YJeii 11.0 1.123 1.1234567890
+NULL -1083386085 11.0 NULL NULL -1083386085 11.0 NULL NULL -1083386085 11.0 NULL LR2AKy0dPt8vFdIV5760jriw 11.0 1.123 1.1234567890
+-646295381 -1654635859 11.0 NULL -646295381 -1654635859 11.0 NULL -646295381 -1654635859 11.0 NULL 1B3WMD5LSk65B2Moa 11.0 1.123 1.1234567890
+130912195 -1286145901 11.0 NULL 130912195 -1286145901 11.0 NULL 130912195 -1286145901 11.0 NULL xTlDv24JYv4s 11.0 1.123 1.1234567890
+-391573084 -236100834 11.0 NULL -391573084 -236100834 11.0 NULL -391573084 -236100834 11.0 NULL 28Oe6r21yux7Lk47 11.0 1.123 1.1234567890
+385623629 236101682 11.0 NULL 385623629 236101682 11.0 NULL 385623629 236101682 11.0 NULL 7wH3hBKdO55Xq3gEEe0 11.0 1.123 1.1234567890
+681126962 993392163 11.0 NULL 681126962 993392163 11.0 NULL 681126962 993392163 11.0 NULL 5QLs0LVK1g 11.0 1.123 1.1234567890
+25892751 -1978674520 11.0 NULL 25892751 -1978674520 11.0 NULL 25892751 -1978674520 11.0 NULL ET3d4F2I4lV 11.0 1.123 1.1234567890
+-838810013 1864027286 NULL NULL -838810013 1864027286 NULL NULL -838810013 1864027286 NULL NULL testvalue NULL 3.321 9.9876543210
PREHOOK: query: DROP TABLE orc_llap_n2
PREHOOK: type: DROPTABLE
PREHOOK: Input: default@orc_llap_n2
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/llap_acid_fast.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_acid_fast.q.out b/ql/src/test/results/clientpositive/llap/llap_acid_fast.q.out
index 37c213b..c4dc6f7 100644
--- a/ql/src/test/results/clientpositive/llap/llap_acid_fast.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_acid_fast.q.out
@@ -118,8 +118,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -263,8 +263,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
@@ -372,8 +372,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: true
usesVectorUDFAdaptor: false
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/llap_decimal64_reader.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_decimal64_reader.q.out b/ql/src/test/results/clientpositive/llap/llap_decimal64_reader.q.out
new file mode 100644
index 0000000..0041206
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/llap_decimal64_reader.q.out
@@ -0,0 +1,303 @@
+PREHOOK: query: DROP TABLE orc_llap_n0
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orc_llap_n0
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orc_llap_n0(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+ cboolean2 BOOLEAN,
+ cdecimal1 decimal(10,2),
+ cdecimal2 decimal(38,5))
+ STORED AS ORC tblproperties ("orc.compress"="NONE")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_llap_n0
+POSTHOOK: query: CREATE TABLE orc_llap_n0(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+ cboolean2 BOOLEAN,
+ cdecimal1 decimal(10,2),
+ cdecimal2 decimal(38,5))
+ STORED AS ORC tblproperties ("orc.compress"="NONE")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_llap_n0
+PREHOOK: query: insert into table orc_llap_n0
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2,
+ cast("3.345" as decimal(10,2)), cast("5.56789" as decimal(38,5)) from alltypesorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@orc_llap_n0
+POSTHOOK: query: insert into table orc_llap_n0
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2,
+ cast("3.345" as decimal(10,2)), cast("5.56789" as decimal(38,5)) from alltypesorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@orc_llap_n0
+POSTHOOK: Lineage: orc_llap_n0.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cdecimal1 SIMPLE []
+POSTHOOK: Lineage: orc_llap_n0.cdecimal2 SIMPLE []
+POSTHOOK: Lineage: orc_llap_n0.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: alter table orc_llap_n0 set tblproperties ("orc.compress"="NONE", 'orc.write.format'='UNSTABLE-PRE-2.0')
+PREHOOK: type: ALTERTABLE_PROPERTIES
+PREHOOK: Input: default@orc_llap_n0
+PREHOOK: Output: default@orc_llap_n0
+POSTHOOK: query: alter table orc_llap_n0 set tblproperties ("orc.compress"="NONE", 'orc.write.format'='UNSTABLE-PRE-2.0')
+POSTHOOK: type: ALTERTABLE_PROPERTIES
+POSTHOOK: Input: default@orc_llap_n0
+POSTHOOK: Output: default@orc_llap_n0
+PREHOOK: query: insert into table orc_llap_n0
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2,
+ cast("4.456" as decimal(10,2)), cast("5.56789" as decimal(38,5)) from alltypesorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@orc_llap_n0
+POSTHOOK: query: insert into table orc_llap_n0
+select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2,
+ cast("4.456" as decimal(10,2)), cast("5.56789" as decimal(38,5)) from alltypesorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@orc_llap_n0
+POSTHOOK: Lineage: orc_llap_n0.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cdecimal1 SIMPLE []
+POSTHOOK: Lineage: orc_llap_n0.cdecimal2 SIMPLE []
+POSTHOOK: Lineage: orc_llap_n0.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_llap_n0.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: explain vectorization select cdecimal1,cdecimal2 from orc_llap_n0 where cdecimal1 = cast("3.345" as decimal(10,2)) or cdecimal1 = cast("4.456" as decimal(10,2))
+ group by cdecimal1,cdecimal2 limit 2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization select cdecimal1,cdecimal2 from orc_llap_n0 where cdecimal1 = cast("3.345" as decimal(10,2)) or cdecimal1 = cast("4.456" as decimal(10,2))
+ group by cdecimal1,cdecimal2 limit 2
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+ enabled: true
+ enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_llap_n0
+ filterExpr: ((cdecimal1 = 3.35) or (cdecimal1 = 4.46)) (type: boolean)
+ Statistics: Num rows: 24576 Data size: 5505024 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: ((cdecimal1 = 3.35) or (cdecimal1 = 4.46)) (type: boolean)
+ Statistics: Num rows: 24576 Data size: 5505024 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ keys: cdecimal1 (type: decimal(10,2)), cdecimal2 (type: decimal(38,5))
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: decimal(10,2)), _col1 (type: decimal(38,5))
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: decimal(10,2)), _col1 (type: decimal(38,5))
+ Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ TopN Hash Memory Usage: 0.1
+ Execution mode: vectorized, llap
+ LLAP IO: all inputs
+ Map Vectorization:
+ enabled: true
+ enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
+ inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ allNative: false
+ usesVectorUDFAdaptor: false
+ vectorized: true
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Vectorization:
+ enabled: true
+ enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+ allNative: false
+ usesVectorUDFAdaptor: false
+ vectorized: true
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: decimal(10,2)), KEY._col1 (type: decimal(38,5))
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ Limit
+ Number of rows: 2
+ Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 2
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select cdecimal1,cdecimal2 from orc_llap_n0 where cdecimal1 = cast("3.345" as decimal(10,2)) or cdecimal1 = cast("4.456" as decimal(10,2))
+ group by cdecimal1,cdecimal2 limit 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_llap_n0
+#### A masked pattern was here ####
+POSTHOOK: query: select cdecimal1,cdecimal2 from orc_llap_n0 where cdecimal1 = cast("3.345" as decimal(10,2)) or cdecimal1 = cast("4.456" as decimal(10,2))
+ group by cdecimal1,cdecimal2 limit 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_llap_n0
+#### A masked pattern was here ####
+4.46 5.56789
+3.35 5.56789
+PREHOOK: query: explain vectorization select cdecimal1,cdecimal2 from orc_llap_n0 where cdecimal1 = cast("3.345" as decimal(10,2)) or cdecimal1 = cast("4.456" as decimal(10,2))
+ group by cdecimal1,cdecimal2 limit 2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization select cdecimal1,cdecimal2 from orc_llap_n0 where cdecimal1 = cast("3.345" as decimal(10,2)) or cdecimal1 = cast("4.456" as decimal(10,2))
+ group by cdecimal1,cdecimal2 limit 2
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+ enabled: true
+ enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_llap_n0
+ filterExpr: ((cdecimal1 = 3.35) or (cdecimal1 = 4.46)) (type: boolean)
+ Statistics: Num rows: 24576 Data size: 5505024 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: ((cdecimal1 = 3.35) or (cdecimal1 = 4.46)) (type: boolean)
+ Statistics: Num rows: 24576 Data size: 5505024 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ keys: cdecimal1 (type: decimal(10,2)), cdecimal2 (type: decimal(38,5))
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: decimal(10,2)), _col1 (type: decimal(38,5))
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: decimal(10,2)), _col1 (type: decimal(38,5))
+ Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ TopN Hash Memory Usage: 0.1
+ Execution mode: vectorized, llap
+ LLAP IO: all inputs
+ Map Vectorization:
+ enabled: true
+ enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+ inputFormatFeatureSupport: [DECIMAL_64]
+ vectorizationSupportRemovedReasons: [[] is disabled because it is not in hive.vectorized.input.format.supports.enabled []]
+ featureSupportInUse: []
+ inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ allNative: false
+ usesVectorUDFAdaptor: false
+ vectorized: true
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Vectorization:
+ enabled: true
+ enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+ allNative: false
+ usesVectorUDFAdaptor: false
+ vectorized: true
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: decimal(10,2)), KEY._col1 (type: decimal(38,5))
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ Limit
+ Number of rows: 2
+ Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 448 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 2
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select cdecimal1,cdecimal2 from orc_llap_n0 where cdecimal1 = cast("3.345" as decimal(10,2)) or cdecimal1 = cast("4.456" as decimal(10,2))
+ group by cdecimal1,cdecimal2 limit 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_llap_n0
+#### A masked pattern was here ####
+POSTHOOK: query: select cdecimal1,cdecimal2 from orc_llap_n0 where cdecimal1 = cast("3.345" as decimal(10,2)) or cdecimal1 = cast("4.456" as decimal(10,2))
+ group by cdecimal1,cdecimal2 limit 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_llap_n0
+#### A masked pattern was here ####
+4.46 5.56789
+3.35 5.56789
+PREHOOK: query: DROP TABLE orc_llap_n0
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_llap_n0
+PREHOOK: Output: default@orc_llap_n0
+POSTHOOK: query: DROP TABLE orc_llap_n0
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_llap_n0
+POSTHOOK: Output: default@orc_llap_n0
http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out b/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out
index faab23c..e6fa1ac 100644
--- a/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_partitioned.q.out
@@ -1679,8 +1679,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -1745,8 +1745,8 @@ STAGE PLANS:
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
- inputFormatFeatureSupport: []
- featureSupportInUse: []
+ inputFormatFeatureSupport: [DECIMAL_64]
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
@@ -2127,8 +2127,7 @@ STAGE PLANS:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
- vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
- featureSupportInUse: []
+ featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: true
[49/67] [abbrv] hive git commit: HIVE-19921: Fix perf duration and
queue name in HiveProtoLoggingHook (Harish JP, reviewd by Anishek Agarwal)
Posted by se...@apache.org.
HIVE-19921: Fix perf duration and queue name in HiveProtoLoggingHook (Harish JP, reviewd by Anishek Agarwal)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c4eb647c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c4eb647c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c4eb647c
Branch: refs/heads/master-txnstats
Commit: c4eb647c6ffffda499541ce178bf82433c26f25e
Parents: 4810511
Author: Anishek Agarwal <an...@gmail.com>
Authored: Mon Jun 18 09:08:34 2018 -0700
Committer: Anishek Agarwal <an...@gmail.com>
Committed: Mon Jun 18 09:08:34 2018 -0700
----------------------------------------------------------------------
.../hive/ql/hooks/HiveProtoLoggingHook.java | 6 +++-
.../hive/ql/hooks/TestHiveProtoLoggingHook.java | 29 +++++++++++++++-----
2 files changed, 27 insertions(+), 8 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/c4eb647c/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
index eef6ac9..bddca1a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java
@@ -366,6 +366,7 @@ public class HiveProtoLoggingHook implements ExecuteWithHookContext {
ApplicationId llapId = determineLlapId(conf, executionMode);
if (llapId != null) {
addMapEntry(builder, OtherInfoType.LLAP_APP_ID, llapId.toString());
+ builder.setQueue(conf.get(HiveConf.ConfVars.LLAP_DAEMON_QUEUE_NAME.varname));
}
conf.stripHiddenConfigurations(conf);
@@ -391,7 +392,10 @@ public class HiveProtoLoggingHook implements ExecuteWithHookContext {
builder.setOperationId(hookContext.getOperationId());
}
addMapEntry(builder, OtherInfoType.STATUS, Boolean.toString(success));
- JSONObject perfObj = new JSONObject(hookContext.getPerfLogger().getEndTimes());
+ JSONObject perfObj = new JSONObject();
+ for (String key : hookContext.getPerfLogger().getEndTimes().keySet()) {
+ perfObj.put(key, hookContext.getPerfLogger().getDuration(key));
+ }
addMapEntry(builder, OtherInfoType.PERF, perfObj.toString());
return builder.build();
http://git-wip-us.apache.org/repos/asf/hive/blob/c4eb647c/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
index 98b73e8..96fb73c 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveProtoLoggingHook.java
@@ -22,6 +22,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
+import java.util.Map;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -46,6 +47,9 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
public class TestHiveProtoLoggingHook {
@@ -106,6 +110,8 @@ public class TestHiveProtoLoggingHook {
@Test
public void testPostEventLog() throws Exception {
context.setHookType(HookType.POST_EXEC_HOOK);
+ context.getPerfLogger().PerfLogBegin("test", "LogTest");
+ context.getPerfLogger().PerfLogEnd("test", "LogTest");
EventLogger evtLogger = new EventLogger(conf, SystemClock.getInstance());
evtLogger.handle(context);
@@ -119,7 +125,11 @@ public class TestHiveProtoLoggingHook {
Assert.assertEquals("test_op_id", event.getOperationId());
assertOtherInfo(event, OtherInfoType.STATUS, Boolean.TRUE.toString());
- assertOtherInfo(event, OtherInfoType.PERF, null);
+ String val = findOtherInfo(event, OtherInfoType.PERF);
+ Map<String, Long> map = new ObjectMapper().readValue(val,
+ new TypeReference<Map<String, Long>>() {});
+ // This should be really close to zero.
+ Assert.assertTrue("Expected LogTest in PERF", map.get("LogTest") < 100);
}
@Test
@@ -158,15 +168,20 @@ public class TestHiveProtoLoggingHook {
return event;
}
- private void assertOtherInfo(HiveHookEventProto event, OtherInfoType key, String value) {
+ private String findOtherInfo(HiveHookEventProto event, OtherInfoType key) {
for (MapFieldEntry otherInfo : event.getOtherInfoList()) {
if (otherInfo.getKey().equals(key.name())) {
- if (value != null) {
- Assert.assertEquals(value, otherInfo.getValue());
- }
- return;
+ return otherInfo.getValue();
}
}
- Assert.fail("Cannot find key: " + key);
+ Assert.fail("Cannot find key " + key);
+ return null;
+ }
+
+ private void assertOtherInfo(HiveHookEventProto event, OtherInfoType key, String value) {
+ String val = findOtherInfo(event, key);
+ if (value != null) {
+ Assert.assertEquals(value, val);
+ }
}
}
[42/67] [abbrv] hive git commit: HIVE-19909: qtests: retire
hadoop_major version specific tests;
and logics (Zoltan Haindrich reviewed by Teddy Choi)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
index a5f5522..f933545 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
set mapred.input.dir.recursive=true;
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- SORT_QUERY_RESULTS
-- List bucketing query logic test case. We simulate the directory structure by DML here.
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
index 4020063..d5f6a26 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
set mapred.input.dir.recursive=true;
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- List bucketing query logic test case. We simulate the directory structure by DML here.
-- Test condition:
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
index 54ab75e..fc5815c 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
set mapred.input.dir.recursive=true;
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- SORT_QUERY_RESULTS
-- List bucketing query logic test case.
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
index 77974cf..bc4f96c 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
set mapred.input.dir.recursive=true;
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- SORT_QUERY_RESULTS
-- List bucketing query logic test case.
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
index bf6b227..64193f1 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
@@ -4,7 +4,6 @@ set hive.optimize.listbucketing=true;
set mapred.input.dir.recursive=true;
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- SORT_QUERY_RESULTS
-- List bucketing query logic test case.
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/loadpart_err.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/loadpart_err.q b/ql/src/test/queries/clientpositive/loadpart_err.q
deleted file mode 100644
index 1204622..0000000
--- a/ql/src/test/queries/clientpositive/loadpart_err.q
+++ /dev/null
@@ -1,21 +0,0 @@
---! qt:dataset:src
-set hive.cli.errors.ignore=true;
-
-ADD FILE ../../data/scripts/error_script;
-
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S, 0.23)
--- (this test is flaky so it is currently disabled for all Hadoop versions)
-
-CREATE TABLE loadpart1(a STRING, b STRING) PARTITIONED BY (ds STRING);
-
-INSERT OVERWRITE TABLE loadpart1 PARTITION (ds='2009-01-01')
-SELECT TRANSFORM(src.key, src.value) USING 'error_script' AS (tkey, tvalue)
-FROM src;
-
-DESCRIBE loadpart1;
-SHOW PARTITIONS loadpart1;
-
-LOAD DATA LOCAL INPATH '../../data1/files/kv1.txt' INTO TABLE loadpart1 PARTITION(ds='2009-05-05');
-SHOW PARTITIONS loadpart1;
-
-
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/recursive_dir.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/recursive_dir.q b/ql/src/test/queries/clientpositive/recursive_dir.q
index 2b25f60..33b2186 100644
--- a/ql/src/test/queries/clientpositive/recursive_dir.q
+++ b/ql/src/test/queries/clientpositive/recursive_dir.q
@@ -1,5 +1,4 @@
--! qt:dataset:src
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
CREATE TABLE fact_daily_n1(x int) PARTITIONED BY (ds STRING);
CREATE TABLE fact_tz_n0(x int) PARTITIONED BY (ds STRING, hr STRING)
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/sample10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample10.q b/ql/src/test/queries/clientpositive/sample10.q
index b0aab14..e566a17 100644
--- a/ql/src/test/queries/clientpositive/sample10.q
+++ b/ql/src/test/queries/clientpositive/sample10.q
@@ -10,7 +10,6 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
set hive.default.fileformat=RCFILE;
set hive.exec.pre.hooks = org.apache.hadoop.hive.ql.hooks.PreExecutePrinter,org.apache.hadoop.hive.ql.hooks.EnforceReadOnlyTables,org.apache.hadoop.hive.ql.hooks.UpdateInputAccessTimeHook$PreExec;
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19)
create table srcpartbucket (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 4 buckets;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q b/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q
index 1518529..8eb4a86 100644
--- a/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q
+++ b/ql/src/test/queries/clientpositive/sample_islocalmode_hook.q
@@ -10,7 +10,6 @@ set mapred.min.split.size.per.rack=300;
set hive.exec.mode.local.auto=true;
set hive.merge.smallfiles.avgsize=1;
--- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
-- create file inputs
create table sih_i_part (key int, value string) partitioned by (p string);
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q b/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q
deleted file mode 100644
index 803ca91..0000000
--- a/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q
+++ /dev/null
@@ -1,42 +0,0 @@
---! qt:dataset:src
-USE default;
-
-set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-set mapred.max.split.size=300;
-set mapred.min.split.size=300;
-set mapred.min.split.size.per.node=300;
-set mapred.min.split.size.per.rack=300;
-set hive.exec.mode.local.auto=true;
-set hive.merge.smallfiles.avgsize=1;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
--- This test sets mapred.max.split.size=300 and hive.merge.smallfiles.avgsize=1
--- in an attempt to force the generation of multiple splits and multiple output files.
--- However, Hadoop 0.20 is incapable of generating splits smaller than the block size
--- when using CombineFileInputFormat, so only one split is generated. This has a
--- significant impact on the results of the TABLESAMPLE(x PERCENT). This issue was
--- fixed in MAPREDUCE-2046 which is included in 0.22.
-
--- create file inputs
-create table sih_i_part_n0 (key int, value string) partitioned by (p string);
-insert overwrite table sih_i_part_n0 partition (p='1') select key, value from src;
-insert overwrite table sih_i_part_n0 partition (p='2') select key+10000, value from src;
-insert overwrite table sih_i_part_n0 partition (p='3') select key+20000, value from src;
-create table sih_src_n0 as select key, value from sih_i_part_n0 order by key, value;
-create table sih_src2_n0 as select key, value from sih_src_n0 order by key, value;
-
-set hive.exec.post.hooks = org.apache.hadoop.hive.ql.hooks.VerifyIsLocalModeHook ;
-set mapred.job.tracker=localhost:58;
-set hive.exec.mode.local.auto.input.files.max=1;
-
--- Sample split, running locally limited by num tasks
-select count(1) from sih_src_n0 tablesample(1 percent);
-
--- sample two tables
-select count(1) from sih_src_n0 tablesample(1 percent)a join sih_src2_n0 tablesample(1 percent)b on a.key = b.key;
-
-set hive.exec.mode.local.auto.inputbytes.max=1000;
-set hive.exec.mode.local.auto.input.files.max=4;
-
--- sample split, running locally limited by max bytes
-select count(1) from sih_src_n0 tablesample(1 percent);
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q b/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q
index 1675263..a08d0c5 100644
--- a/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q
+++ b/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q
@@ -11,7 +11,6 @@ set hive.exec.mode.local.auto=true;
set hive.merge.smallfiles.avgsize=1;
set hive.compute.query.using.stats=true;
--- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
-- create file inputs
create table sih_i_part_n1 (key int, value string) partitioned by (p string);
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q b/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q
index 9d5571b..2db13f0 100644
--- a/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q
+++ b/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q
@@ -13,7 +13,6 @@ set mapred.input.dir.recursive=true;
-- Union of 2 map-reduce subqueries is performed for the skew join
-- There is no need to write the temporary results of the sub-queries, and then read them
-- again to process the union. The union can be removed completely.
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output, it might be easier to run the test
-- only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q b/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q
index 06ebfdc..2a41e3a 100644
--- a/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q
+++ b/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q
@@ -27,7 +27,6 @@ LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n2;
-- Union of 3 map-reduce subqueries is performed for the skew join
-- There is no need to write the temporary results of the sub-queries, and then read them
-- again to process the union. The union can be removed completely.
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/stats_list_bucket.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats_list_bucket.q b/ql/src/test/queries/clientpositive/stats_list_bucket.q
index c4339d0..bbb4206 100644
--- a/ql/src/test/queries/clientpositive/stats_list_bucket.q
+++ b/ql/src/test/queries/clientpositive/stats_list_bucket.q
@@ -1,6 +1,5 @@
--! qt:dataset:src
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
drop table stats_list_bucket;
drop table stats_list_bucket_1;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/truncate_column_list_bucket.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/truncate_column_list_bucket.q b/ql/src/test/queries/clientpositive/truncate_column_list_bucket.q
index f7498aa..eb08af1 100644
--- a/ql/src/test/queries/clientpositive/truncate_column_list_bucket.q
+++ b/ql/src/test/queries/clientpositive/truncate_column_list_bucket.q
@@ -8,7 +8,6 @@ set mapred.input.dir.recursive=true;
-- Tests truncating a column from a list bucketing table
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
CREATE TABLE test_tab_n3 (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/uber_reduce.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/uber_reduce.q b/ql/src/test/queries/clientpositive/uber_reduce.q
index c6bbf60..a8b1e36 100644
--- a/ql/src/test/queries/clientpositive/uber_reduce.q
+++ b/ql/src/test/queries/clientpositive/uber_reduce.q
@@ -3,7 +3,6 @@ SET mapreduce.job.ubertask.maxreduces=1;
SET mapred.reduce.tasks=1;
-- Uberized mode is a YARN option, ignore this test for non-YARN Hadoop versions
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
CREATE TABLE T1_n136(key STRING, val STRING);
LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n136;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/udaf_percentile_approx_20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/udaf_percentile_approx_20.q b/ql/src/test/queries/clientpositive/udaf_percentile_approx_20.q
deleted file mode 100644
index 8fca1c9..0000000
--- a/ql/src/test/queries/clientpositive/udaf_percentile_approx_20.q
+++ /dev/null
@@ -1,87 +0,0 @@
-set hive.strict.checks.bucketing=false;
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
-
-CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../../data/files/auto_sortmerge_join/big/000000_0' INTO TABLE bucket;
-load data local inpath '../../data/files/auto_sortmerge_join/big/000001_0' INTO TABLE bucket;
-load data local inpath '../../data/files/auto_sortmerge_join/big/000002_0' INTO TABLE bucket;
-load data local inpath '../../data/files/auto_sortmerge_join/big/000003_0' INTO TABLE bucket;
-
-create table t1_n10 (result double);
-create table t2_n6 (result double);
-create table t3_n3 (result double);
-create table t4_n0 (result double);
-create table t5 (result double);
-create table t6 (result double);
-create table t7_n0 (result array<double>);
-create table t8 (result array<double>);
-create table t9 (result array<double>);
-create table t10 (result array<double>);
-create table t11 (result array<double>);
-create table t12 (result array<double>);
-
-set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.map.aggr=false;
--- disable map-side aggregation
-FROM bucket
-insert overwrite table t1_n10 SELECT percentile_approx(cast(key AS double), 0.5)
-insert overwrite table t2_n6 SELECT percentile_approx(cast(key AS double), 0.5, 100)
-insert overwrite table t3_n3 SELECT percentile_approx(cast(key AS double), 0.5, 1000)
-
-insert overwrite table t4_n0 SELECT percentile_approx(cast(key AS int), 0.5)
-insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100)
-insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000)
-
-insert overwrite table t7_n0 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98))
-insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100)
-insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000)
-
-insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98))
-insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100)
-insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000);
-
-select * from t1_n10;
-select * from t2_n6;
-select * from t3_n3;
-select * from t4_n0;
-select * from t5;
-select * from t6;
-select * from t7_n0;
-select * from t8;
-select * from t9;
-select * from t10;
-select * from t11;
-select * from t12;
-
-set hive.map.aggr=true;
--- enable map-side aggregation
-FROM bucket
-insert overwrite table t1_n10 SELECT percentile_approx(cast(key AS double), 0.5)
-insert overwrite table t2_n6 SELECT percentile_approx(cast(key AS double), 0.5, 100)
-insert overwrite table t3_n3 SELECT percentile_approx(cast(key AS double), 0.5, 1000)
-
-insert overwrite table t4_n0 SELECT percentile_approx(cast(key AS int), 0.5)
-insert overwrite table t5 SELECT percentile_approx(cast(key AS int), 0.5, 100)
-insert overwrite table t6 SELECT percentile_approx(cast(key AS int), 0.5, 1000)
-
-insert overwrite table t7_n0 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98))
-insert overwrite table t8 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 100)
-insert overwrite table t9 SELECT percentile_approx(cast(key AS double), array(0.05,0.5,0.95,0.98), 1000)
-
-insert overwrite table t10 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98))
-insert overwrite table t11 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 100)
-insert overwrite table t12 SELECT percentile_approx(cast(key AS int), array(0.05,0.5,0.95,0.98), 1000);
-
-select * from t1_n10;
-select * from t2_n6;
-select * from t3_n3;
-select * from t4_n0;
-select * from t5;
-select * from t6;
-select * from t7_n0;
-select * from t8;
-select * from t9;
-select * from t10;
-select * from t11;
-select * from t12;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q b/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
index db1fc88..bce38ef 100644
--- a/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
+++ b/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
@@ -1,7 +1,6 @@
set hive.strict.checks.bucketing=false;
set hive.mapred.mode=nonstrict;
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- 0.23 changed input order of data in reducer task, which affects result of percentile_approx
CREATE TABLE bucket_n0 (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE;
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_1.q b/ql/src/test/queries/clientpositive/union_remove_1.q
index 0a69068..7276804 100644
--- a/ql/src/test/queries/clientpositive/union_remove_1.q
+++ b/ql/src/test/queries/clientpositive/union_remove_1.q
@@ -14,7 +14,6 @@ set mapred.input.dir.recursive=true;
-- again to process the union. The union can be removed completely.
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- off
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_10.q b/ql/src/test/queries/clientpositive/union_remove_10.q
index 71a0892..dfd5d0a 100644
--- a/ql/src/test/queries/clientpositive/union_remove_10.q
+++ b/ql/src/test/queries/clientpositive/union_remove_10.q
@@ -19,7 +19,6 @@ set mapred.input.dir.recursive=true;
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- on
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n9, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_11.q b/ql/src/test/queries/clientpositive/union_remove_11.q
index bfd734d..6017336 100644
--- a/ql/src/test/queries/clientpositive/union_remove_11.q
+++ b/ql/src/test/queries/clientpositive/union_remove_11.q
@@ -19,7 +19,6 @@ set mapred.input.dir.recursive=true;
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- on
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n21, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_12.q b/ql/src/test/queries/clientpositive/union_remove_12.q
index cdddc71..392c297 100644
--- a/ql/src/test/queries/clientpositive/union_remove_12.q
+++ b/ql/src/test/queries/clientpositive/union_remove_12.q
@@ -15,7 +15,6 @@ set mapred.input.dir.recursive=true;
-- other one is a map-join query), followed by select star and a file sink.
-- The union optimization is applied, and the union is removed.
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n29, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_13.q b/ql/src/test/queries/clientpositive/union_remove_13.q
index dcf14e0..6f337ad 100644
--- a/ql/src/test/queries/clientpositive/union_remove_13.q
+++ b/ql/src/test/queries/clientpositive/union_remove_13.q
@@ -15,7 +15,6 @@ set mapred.input.dir.recursive=true;
-- other one is a map-join query), followed by select star and a file sink.
-- The union selectstar optimization should be performed, and the union should be removed.
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n3, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_14.q b/ql/src/test/queries/clientpositive/union_remove_14.q
index 04e2998..d98dbe5 100644
--- a/ql/src/test/queries/clientpositive/union_remove_14.q
+++ b/ql/src/test/queries/clientpositive/union_remove_14.q
@@ -16,7 +16,6 @@ set mapred.input.dir.recursive=true;
-- followed by select star and a file sink.
-- The union selectstar optimization should be performed, and the union should be removed.
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n16, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_15.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_15.q b/ql/src/test/queries/clientpositive/union_remove_15.q
index 096d330..9c0f3a3 100644
--- a/ql/src/test/queries/clientpositive/union_remove_15.q
+++ b/ql/src/test/queries/clientpositive/union_remove_15.q
@@ -20,7 +20,6 @@ set mapred.input.dir.recursive=true;
-- off
-- This tests demonstrates that this optimization works in the presence of dynamic partitions.
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n25, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_16.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_16.q b/ql/src/test/queries/clientpositive/union_remove_16.q
index 053528e..ec24cf0 100644
--- a/ql/src/test/queries/clientpositive/union_remove_16.q
+++ b/ql/src/test/queries/clientpositive/union_remove_16.q
@@ -20,7 +20,6 @@ set hive.exec.dynamic.partition=true;
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- on
-- This test demonstrates that this optimization works in the presence of dynamic partitions.
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n32, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_17.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_17.q b/ql/src/test/queries/clientpositive/union_remove_17.q
index eb9a093..92fa2e9 100644
--- a/ql/src/test/queries/clientpositive/union_remove_17.q
+++ b/ql/src/test/queries/clientpositive/union_remove_17.q
@@ -17,7 +17,6 @@ set mapred.input.dir.recursive=true;
-- There is no need for this optimization, since the query is a map-only query.
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- off
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n4, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_18.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_18.q b/ql/src/test/queries/clientpositive/union_remove_18.q
index 1c5e921..8259198 100644
--- a/ql/src/test/queries/clientpositive/union_remove_18.q
+++ b/ql/src/test/queries/clientpositive/union_remove_18.q
@@ -19,7 +19,6 @@ set mapred.input.dir.recursive=true;
-- off
-- This test demonstrates that the optimization works with dynamic partitions irrespective of the
-- file format of the output file
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n30, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_19.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_19.q b/ql/src/test/queries/clientpositive/union_remove_19.q
index 75285ee..675f01b 100644
--- a/ql/src/test/queries/clientpositive/union_remove_19.q
+++ b/ql/src/test/queries/clientpositive/union_remove_19.q
@@ -14,7 +14,6 @@ set mapred.input.dir.recursive=true;
-- again to process the union. The union can be removed completely.
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- off
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n1, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_2.q b/ql/src/test/queries/clientpositive/union_remove_2.q
index c0e395f..e5fe0bf 100644
--- a/ql/src/test/queries/clientpositive/union_remove_2.q
+++ b/ql/src/test/queries/clientpositive/union_remove_2.q
@@ -15,7 +15,6 @@ set mapred.input.dir.recursive=true;
-- again to process the union. The union can be removed completely.
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- off
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n11, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_20.q b/ql/src/test/queries/clientpositive/union_remove_20.q
index 5343f58..79a9059 100644
--- a/ql/src/test/queries/clientpositive/union_remove_20.q
+++ b/ql/src/test/queries/clientpositive/union_remove_20.q
@@ -14,7 +14,6 @@ set mapred.input.dir.recursive=true;
-- be removed.
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- off
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n27, it might be easier
-- to run the test only on hadoop 23. The union is removed, the select (which changes the order of
-- columns being selected) is pushed above the union.
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_21.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_21.q b/ql/src/test/queries/clientpositive/union_remove_21.q
index d51de64..088a80e 100644
--- a/ql/src/test/queries/clientpositive/union_remove_21.q
+++ b/ql/src/test/queries/clientpositive/union_remove_21.q
@@ -14,7 +14,6 @@ set mapred.input.dir.recursive=true;
-- be removed.
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- off
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n17, it might be easier
-- to run the test only on hadoop 23. The union is removed, the select (which changes the order of
-- columns being selected) is pushed above the union.
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_22.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_22.q b/ql/src/test/queries/clientpositive/union_remove_22.q
index 134e650..ef3ba51 100644
--- a/ql/src/test/queries/clientpositive/union_remove_22.q
+++ b/ql/src/test/queries/clientpositive/union_remove_22.q
@@ -14,7 +14,6 @@ set mapred.input.dir.recursive=true;
-- However, some columns are repeated. So, union cannot be removed.
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- off
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n7, it might be easier
-- to run the test only on hadoop 23. The union is removed, the select (which selects columns from
-- both the sub-qeuries of the union) is pushed above the union.
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_23.q b/ql/src/test/queries/clientpositive/union_remove_23.q
index 8ac2093..3145ac0 100644
--- a/ql/src/test/queries/clientpositive/union_remove_23.q
+++ b/ql/src/test/queries/clientpositive/union_remove_23.q
@@ -15,7 +15,6 @@ set mapred.input.dir.recursive=true;
-- would have multiple map-reduce jobs.
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- off
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n34, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_24.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_24.q b/ql/src/test/queries/clientpositive/union_remove_24.q
index ea3c12b..6c34f56 100644
--- a/ql/src/test/queries/clientpositive/union_remove_24.q
+++ b/ql/src/test/queries/clientpositive/union_remove_24.q
@@ -13,7 +13,6 @@ set mapred.input.dir.recursive=true;
-- There is no need to write the temporary results of the sub-queries, and then read them
-- again to process the union. The union can be removed completely.
-- One sub-query has a double and the other sub-query has a bigint.
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n28, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_25.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_25.q b/ql/src/test/queries/clientpositive/union_remove_25.q
index e6d1b0d..b186c2c 100644
--- a/ql/src/test/queries/clientpositive/union_remove_25.q
+++ b/ql/src/test/queries/clientpositive/union_remove_25.q
@@ -16,7 +16,6 @@ set mapred.input.dir.recursive=true;
-- again to process the union. The union can be removed completely.
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- off
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n19, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_3.q b/ql/src/test/queries/clientpositive/union_remove_3.q
index f2c8541..490be99 100644
--- a/ql/src/test/queries/clientpositive/union_remove_3.q
+++ b/ql/src/test/queries/clientpositive/union_remove_3.q
@@ -15,7 +15,6 @@ set mapred.input.dir.recursive=true;
-- a single map-only job
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- off
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n23, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_4.q b/ql/src/test/queries/clientpositive/union_remove_4.q
index 0b2b818..adc45e7 100644
--- a/ql/src/test/queries/clientpositive/union_remove_4.q
+++ b/ql/src/test/queries/clientpositive/union_remove_4.q
@@ -15,7 +15,6 @@ set hive.merge.smallfiles.avgsize=1;
-- again to process the union. The union can be removed completely.
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- on
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n33, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_5.q b/ql/src/test/queries/clientpositive/union_remove_5.q
index 7c78714..05f7c32 100644
--- a/ql/src/test/queries/clientpositive/union_remove_5.q
+++ b/ql/src/test/queries/clientpositive/union_remove_5.q
@@ -17,7 +17,6 @@ set mapred.input.dir.recursive=true;
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- on
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n6, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_7.q b/ql/src/test/queries/clientpositive/union_remove_7.q
index 43a5fe1..caca645 100644
--- a/ql/src/test/queries/clientpositive/union_remove_7.q
+++ b/ql/src/test/queries/clientpositive/union_remove_7.q
@@ -16,7 +16,6 @@ set mapred.input.dir.recursive=true;
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- off
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n24, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_8.q b/ql/src/test/queries/clientpositive/union_remove_8.q
index 05a5671..397460e 100644
--- a/ql/src/test/queries/clientpositive/union_remove_8.q
+++ b/ql/src/test/queries/clientpositive/union_remove_8.q
@@ -17,7 +17,6 @@ set mapred.input.dir.recursive=true;
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- off
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n12, it might be easier
-- to run the test only on hadoop 23
http://git-wip-us.apache.org/repos/asf/hive/blob/4ec256c2/ql/src/test/queries/clientpositive/union_remove_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_remove_9.q b/ql/src/test/queries/clientpositive/union_remove_9.q
index 475e52c..3b24b95 100644
--- a/ql/src/test/queries/clientpositive/union_remove_9.q
+++ b/ql/src/test/queries/clientpositive/union_remove_9.q
@@ -17,7 +17,6 @@ set mapred.input.dir.recursive=true;
-- It does not matter, whether the output is merged or not. In this case, merging is turned
-- on
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
-- Since this test creates sub-directories for the output table outputTbl1_n22, it might be easier
-- to run the test only on hadoop 23
[64/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index e459bc2..7a81dfb 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -2334,14 +2334,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1202;
- ::apache::thrift::protocol::TType _etype1205;
- xfer += iprot->readListBegin(_etype1205, _size1202);
- this->success.resize(_size1202);
- uint32_t _i1206;
- for (_i1206 = 0; _i1206 < _size1202; ++_i1206)
+ uint32_t _size1221;
+ ::apache::thrift::protocol::TType _etype1224;
+ xfer += iprot->readListBegin(_etype1224, _size1221);
+ this->success.resize(_size1221);
+ uint32_t _i1225;
+ for (_i1225 = 0; _i1225 < _size1221; ++_i1225)
{
- xfer += iprot->readString(this->success[_i1206]);
+ xfer += iprot->readString(this->success[_i1225]);
}
xfer += iprot->readListEnd();
}
@@ -2380,10 +2380,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1207;
- for (_iter1207 = this->success.begin(); _iter1207 != this->success.end(); ++_iter1207)
+ std::vector<std::string> ::const_iterator _iter1226;
+ for (_iter1226 = this->success.begin(); _iter1226 != this->success.end(); ++_iter1226)
{
- xfer += oprot->writeString((*_iter1207));
+ xfer += oprot->writeString((*_iter1226));
}
xfer += oprot->writeListEnd();
}
@@ -2428,14 +2428,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1208;
- ::apache::thrift::protocol::TType _etype1211;
- xfer += iprot->readListBegin(_etype1211, _size1208);
- (*(this->success)).resize(_size1208);
- uint32_t _i1212;
- for (_i1212 = 0; _i1212 < _size1208; ++_i1212)
+ uint32_t _size1227;
+ ::apache::thrift::protocol::TType _etype1230;
+ xfer += iprot->readListBegin(_etype1230, _size1227);
+ (*(this->success)).resize(_size1227);
+ uint32_t _i1231;
+ for (_i1231 = 0; _i1231 < _size1227; ++_i1231)
{
- xfer += iprot->readString((*(this->success))[_i1212]);
+ xfer += iprot->readString((*(this->success))[_i1231]);
}
xfer += iprot->readListEnd();
}
@@ -2552,14 +2552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1213;
- ::apache::thrift::protocol::TType _etype1216;
- xfer += iprot->readListBegin(_etype1216, _size1213);
- this->success.resize(_size1213);
- uint32_t _i1217;
- for (_i1217 = 0; _i1217 < _size1213; ++_i1217)
+ uint32_t _size1232;
+ ::apache::thrift::protocol::TType _etype1235;
+ xfer += iprot->readListBegin(_etype1235, _size1232);
+ this->success.resize(_size1232);
+ uint32_t _i1236;
+ for (_i1236 = 0; _i1236 < _size1232; ++_i1236)
{
- xfer += iprot->readString(this->success[_i1217]);
+ xfer += iprot->readString(this->success[_i1236]);
}
xfer += iprot->readListEnd();
}
@@ -2598,10 +2598,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1218;
- for (_iter1218 = this->success.begin(); _iter1218 != this->success.end(); ++_iter1218)
+ std::vector<std::string> ::const_iterator _iter1237;
+ for (_iter1237 = this->success.begin(); _iter1237 != this->success.end(); ++_iter1237)
{
- xfer += oprot->writeString((*_iter1218));
+ xfer += oprot->writeString((*_iter1237));
}
xfer += oprot->writeListEnd();
}
@@ -2646,14 +2646,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1219;
- ::apache::thrift::protocol::TType _etype1222;
- xfer += iprot->readListBegin(_etype1222, _size1219);
- (*(this->success)).resize(_size1219);
- uint32_t _i1223;
- for (_i1223 = 0; _i1223 < _size1219; ++_i1223)
+ uint32_t _size1238;
+ ::apache::thrift::protocol::TType _etype1241;
+ xfer += iprot->readListBegin(_etype1241, _size1238);
+ (*(this->success)).resize(_size1238);
+ uint32_t _i1242;
+ for (_i1242 = 0; _i1242 < _size1238; ++_i1242)
{
- xfer += iprot->readString((*(this->success))[_i1223]);
+ xfer += iprot->readString((*(this->success))[_i1242]);
}
xfer += iprot->readListEnd();
}
@@ -3715,17 +3715,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->success.clear();
- uint32_t _size1224;
- ::apache::thrift::protocol::TType _ktype1225;
- ::apache::thrift::protocol::TType _vtype1226;
- xfer += iprot->readMapBegin(_ktype1225, _vtype1226, _size1224);
- uint32_t _i1228;
- for (_i1228 = 0; _i1228 < _size1224; ++_i1228)
+ uint32_t _size1243;
+ ::apache::thrift::protocol::TType _ktype1244;
+ ::apache::thrift::protocol::TType _vtype1245;
+ xfer += iprot->readMapBegin(_ktype1244, _vtype1245, _size1243);
+ uint32_t _i1247;
+ for (_i1247 = 0; _i1247 < _size1243; ++_i1247)
{
- std::string _key1229;
- xfer += iprot->readString(_key1229);
- Type& _val1230 = this->success[_key1229];
- xfer += _val1230.read(iprot);
+ std::string _key1248;
+ xfer += iprot->readString(_key1248);
+ Type& _val1249 = this->success[_key1248];
+ xfer += _val1249.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -3764,11 +3764,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::map<std::string, Type> ::const_iterator _iter1231;
- for (_iter1231 = this->success.begin(); _iter1231 != this->success.end(); ++_iter1231)
+ std::map<std::string, Type> ::const_iterator _iter1250;
+ for (_iter1250 = this->success.begin(); _iter1250 != this->success.end(); ++_iter1250)
{
- xfer += oprot->writeString(_iter1231->first);
- xfer += _iter1231->second.write(oprot);
+ xfer += oprot->writeString(_iter1250->first);
+ xfer += _iter1250->second.write(oprot);
}
xfer += oprot->writeMapEnd();
}
@@ -3813,17 +3813,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
(*(this->success)).clear();
- uint32_t _size1232;
- ::apache::thrift::protocol::TType _ktype1233;
- ::apache::thrift::protocol::TType _vtype1234;
- xfer += iprot->readMapBegin(_ktype1233, _vtype1234, _size1232);
- uint32_t _i1236;
- for (_i1236 = 0; _i1236 < _size1232; ++_i1236)
+ uint32_t _size1251;
+ ::apache::thrift::protocol::TType _ktype1252;
+ ::apache::thrift::protocol::TType _vtype1253;
+ xfer += iprot->readMapBegin(_ktype1252, _vtype1253, _size1251);
+ uint32_t _i1255;
+ for (_i1255 = 0; _i1255 < _size1251; ++_i1255)
{
- std::string _key1237;
- xfer += iprot->readString(_key1237);
- Type& _val1238 = (*(this->success))[_key1237];
- xfer += _val1238.read(iprot);
+ std::string _key1256;
+ xfer += iprot->readString(_key1256);
+ Type& _val1257 = (*(this->success))[_key1256];
+ xfer += _val1257.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -3977,14 +3977,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1239;
- ::apache::thrift::protocol::TType _etype1242;
- xfer += iprot->readListBegin(_etype1242, _size1239);
- this->success.resize(_size1239);
- uint32_t _i1243;
- for (_i1243 = 0; _i1243 < _size1239; ++_i1243)
+ uint32_t _size1258;
+ ::apache::thrift::protocol::TType _etype1261;
+ xfer += iprot->readListBegin(_etype1261, _size1258);
+ this->success.resize(_size1258);
+ uint32_t _i1262;
+ for (_i1262 = 0; _i1262 < _size1258; ++_i1262)
{
- xfer += this->success[_i1243].read(iprot);
+ xfer += this->success[_i1262].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4039,10 +4039,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1244;
- for (_iter1244 = this->success.begin(); _iter1244 != this->success.end(); ++_iter1244)
+ std::vector<FieldSchema> ::const_iterator _iter1263;
+ for (_iter1263 = this->success.begin(); _iter1263 != this->success.end(); ++_iter1263)
{
- xfer += (*_iter1244).write(oprot);
+ xfer += (*_iter1263).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4095,14 +4095,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1245;
- ::apache::thrift::protocol::TType _etype1248;
- xfer += iprot->readListBegin(_etype1248, _size1245);
- (*(this->success)).resize(_size1245);
- uint32_t _i1249;
- for (_i1249 = 0; _i1249 < _size1245; ++_i1249)
+ uint32_t _size1264;
+ ::apache::thrift::protocol::TType _etype1267;
+ xfer += iprot->readListBegin(_etype1267, _size1264);
+ (*(this->success)).resize(_size1264);
+ uint32_t _i1268;
+ for (_i1268 = 0; _i1268 < _size1264; ++_i1268)
{
- xfer += (*(this->success))[_i1249].read(iprot);
+ xfer += (*(this->success))[_i1268].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4288,14 +4288,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1250;
- ::apache::thrift::protocol::TType _etype1253;
- xfer += iprot->readListBegin(_etype1253, _size1250);
- this->success.resize(_size1250);
- uint32_t _i1254;
- for (_i1254 = 0; _i1254 < _size1250; ++_i1254)
+ uint32_t _size1269;
+ ::apache::thrift::protocol::TType _etype1272;
+ xfer += iprot->readListBegin(_etype1272, _size1269);
+ this->success.resize(_size1269);
+ uint32_t _i1273;
+ for (_i1273 = 0; _i1273 < _size1269; ++_i1273)
{
- xfer += this->success[_i1254].read(iprot);
+ xfer += this->success[_i1273].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4350,10 +4350,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(:
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1255;
- for (_iter1255 = this->success.begin(); _iter1255 != this->success.end(); ++_iter1255)
+ std::vector<FieldSchema> ::const_iterator _iter1274;
+ for (_iter1274 = this->success.begin(); _iter1274 != this->success.end(); ++_iter1274)
{
- xfer += (*_iter1255).write(oprot);
+ xfer += (*_iter1274).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4406,14 +4406,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1256;
- ::apache::thrift::protocol::TType _etype1259;
- xfer += iprot->readListBegin(_etype1259, _size1256);
- (*(this->success)).resize(_size1256);
- uint32_t _i1260;
- for (_i1260 = 0; _i1260 < _size1256; ++_i1260)
+ uint32_t _size1275;
+ ::apache::thrift::protocol::TType _etype1278;
+ xfer += iprot->readListBegin(_etype1278, _size1275);
+ (*(this->success)).resize(_size1275);
+ uint32_t _i1279;
+ for (_i1279 = 0; _i1279 < _size1275; ++_i1279)
{
- xfer += (*(this->success))[_i1260].read(iprot);
+ xfer += (*(this->success))[_i1279].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4583,14 +4583,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1261;
- ::apache::thrift::protocol::TType _etype1264;
- xfer += iprot->readListBegin(_etype1264, _size1261);
- this->success.resize(_size1261);
- uint32_t _i1265;
- for (_i1265 = 0; _i1265 < _size1261; ++_i1265)
+ uint32_t _size1280;
+ ::apache::thrift::protocol::TType _etype1283;
+ xfer += iprot->readListBegin(_etype1283, _size1280);
+ this->success.resize(_size1280);
+ uint32_t _i1284;
+ for (_i1284 = 0; _i1284 < _size1280; ++_i1284)
{
- xfer += this->success[_i1265].read(iprot);
+ xfer += this->success[_i1284].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4645,10 +4645,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1266;
- for (_iter1266 = this->success.begin(); _iter1266 != this->success.end(); ++_iter1266)
+ std::vector<FieldSchema> ::const_iterator _iter1285;
+ for (_iter1285 = this->success.begin(); _iter1285 != this->success.end(); ++_iter1285)
{
- xfer += (*_iter1266).write(oprot);
+ xfer += (*_iter1285).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -4701,14 +4701,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1267;
- ::apache::thrift::protocol::TType _etype1270;
- xfer += iprot->readListBegin(_etype1270, _size1267);
- (*(this->success)).resize(_size1267);
- uint32_t _i1271;
- for (_i1271 = 0; _i1271 < _size1267; ++_i1271)
+ uint32_t _size1286;
+ ::apache::thrift::protocol::TType _etype1289;
+ xfer += iprot->readListBegin(_etype1289, _size1286);
+ (*(this->success)).resize(_size1286);
+ uint32_t _i1290;
+ for (_i1290 = 0; _i1290 < _size1286; ++_i1290)
{
- xfer += (*(this->success))[_i1271].read(iprot);
+ xfer += (*(this->success))[_i1290].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4894,14 +4894,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1272;
- ::apache::thrift::protocol::TType _etype1275;
- xfer += iprot->readListBegin(_etype1275, _size1272);
- this->success.resize(_size1272);
- uint32_t _i1276;
- for (_i1276 = 0; _i1276 < _size1272; ++_i1276)
+ uint32_t _size1291;
+ ::apache::thrift::protocol::TType _etype1294;
+ xfer += iprot->readListBegin(_etype1294, _size1291);
+ this->success.resize(_size1291);
+ uint32_t _i1295;
+ for (_i1295 = 0; _i1295 < _size1291; ++_i1295)
{
- xfer += this->success[_i1276].read(iprot);
+ xfer += this->success[_i1295].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -4956,10 +4956,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(:
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<FieldSchema> ::const_iterator _iter1277;
- for (_iter1277 = this->success.begin(); _iter1277 != this->success.end(); ++_iter1277)
+ std::vector<FieldSchema> ::const_iterator _iter1296;
+ for (_iter1296 = this->success.begin(); _iter1296 != this->success.end(); ++_iter1296)
{
- xfer += (*_iter1277).write(oprot);
+ xfer += (*_iter1296).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5012,14 +5012,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1278;
- ::apache::thrift::protocol::TType _etype1281;
- xfer += iprot->readListBegin(_etype1281, _size1278);
- (*(this->success)).resize(_size1278);
- uint32_t _i1282;
- for (_i1282 = 0; _i1282 < _size1278; ++_i1282)
+ uint32_t _size1297;
+ ::apache::thrift::protocol::TType _etype1300;
+ xfer += iprot->readListBegin(_etype1300, _size1297);
+ (*(this->success)).resize(_size1297);
+ uint32_t _i1301;
+ for (_i1301 = 0; _i1301 < _size1297; ++_i1301)
{
- xfer += (*(this->success))[_i1282].read(iprot);
+ xfer += (*(this->success))[_i1301].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5612,14 +5612,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->primaryKeys.clear();
- uint32_t _size1283;
- ::apache::thrift::protocol::TType _etype1286;
- xfer += iprot->readListBegin(_etype1286, _size1283);
- this->primaryKeys.resize(_size1283);
- uint32_t _i1287;
- for (_i1287 = 0; _i1287 < _size1283; ++_i1287)
+ uint32_t _size1302;
+ ::apache::thrift::protocol::TType _etype1305;
+ xfer += iprot->readListBegin(_etype1305, _size1302);
+ this->primaryKeys.resize(_size1302);
+ uint32_t _i1306;
+ for (_i1306 = 0; _i1306 < _size1302; ++_i1306)
{
- xfer += this->primaryKeys[_i1287].read(iprot);
+ xfer += this->primaryKeys[_i1306].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5632,14 +5632,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->foreignKeys.clear();
- uint32_t _size1288;
- ::apache::thrift::protocol::TType _etype1291;
- xfer += iprot->readListBegin(_etype1291, _size1288);
- this->foreignKeys.resize(_size1288);
- uint32_t _i1292;
- for (_i1292 = 0; _i1292 < _size1288; ++_i1292)
+ uint32_t _size1307;
+ ::apache::thrift::protocol::TType _etype1310;
+ xfer += iprot->readListBegin(_etype1310, _size1307);
+ this->foreignKeys.resize(_size1307);
+ uint32_t _i1311;
+ for (_i1311 = 0; _i1311 < _size1307; ++_i1311)
{
- xfer += this->foreignKeys[_i1292].read(iprot);
+ xfer += this->foreignKeys[_i1311].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5652,14 +5652,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->uniqueConstraints.clear();
- uint32_t _size1293;
- ::apache::thrift::protocol::TType _etype1296;
- xfer += iprot->readListBegin(_etype1296, _size1293);
- this->uniqueConstraints.resize(_size1293);
- uint32_t _i1297;
- for (_i1297 = 0; _i1297 < _size1293; ++_i1297)
+ uint32_t _size1312;
+ ::apache::thrift::protocol::TType _etype1315;
+ xfer += iprot->readListBegin(_etype1315, _size1312);
+ this->uniqueConstraints.resize(_size1312);
+ uint32_t _i1316;
+ for (_i1316 = 0; _i1316 < _size1312; ++_i1316)
{
- xfer += this->uniqueConstraints[_i1297].read(iprot);
+ xfer += this->uniqueConstraints[_i1316].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5672,14 +5672,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->notNullConstraints.clear();
- uint32_t _size1298;
- ::apache::thrift::protocol::TType _etype1301;
- xfer += iprot->readListBegin(_etype1301, _size1298);
- this->notNullConstraints.resize(_size1298);
- uint32_t _i1302;
- for (_i1302 = 0; _i1302 < _size1298; ++_i1302)
+ uint32_t _size1317;
+ ::apache::thrift::protocol::TType _etype1320;
+ xfer += iprot->readListBegin(_etype1320, _size1317);
+ this->notNullConstraints.resize(_size1317);
+ uint32_t _i1321;
+ for (_i1321 = 0; _i1321 < _size1317; ++_i1321)
{
- xfer += this->notNullConstraints[_i1302].read(iprot);
+ xfer += this->notNullConstraints[_i1321].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5692,14 +5692,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->defaultConstraints.clear();
- uint32_t _size1303;
- ::apache::thrift::protocol::TType _etype1306;
- xfer += iprot->readListBegin(_etype1306, _size1303);
- this->defaultConstraints.resize(_size1303);
- uint32_t _i1307;
- for (_i1307 = 0; _i1307 < _size1303; ++_i1307)
+ uint32_t _size1322;
+ ::apache::thrift::protocol::TType _etype1325;
+ xfer += iprot->readListBegin(_etype1325, _size1322);
+ this->defaultConstraints.resize(_size1322);
+ uint32_t _i1326;
+ for (_i1326 = 0; _i1326 < _size1322; ++_i1326)
{
- xfer += this->defaultConstraints[_i1307].read(iprot);
+ xfer += this->defaultConstraints[_i1326].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5712,14 +5712,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->checkConstraints.clear();
- uint32_t _size1308;
- ::apache::thrift::protocol::TType _etype1311;
- xfer += iprot->readListBegin(_etype1311, _size1308);
- this->checkConstraints.resize(_size1308);
- uint32_t _i1312;
- for (_i1312 = 0; _i1312 < _size1308; ++_i1312)
+ uint32_t _size1327;
+ ::apache::thrift::protocol::TType _etype1330;
+ xfer += iprot->readListBegin(_etype1330, _size1327);
+ this->checkConstraints.resize(_size1327);
+ uint32_t _i1331;
+ for (_i1331 = 0; _i1331 < _size1327; ++_i1331)
{
- xfer += this->checkConstraints[_i1312].read(iprot);
+ xfer += this->checkConstraints[_i1331].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -5752,10 +5752,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter1313;
- for (_iter1313 = this->primaryKeys.begin(); _iter1313 != this->primaryKeys.end(); ++_iter1313)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter1332;
+ for (_iter1332 = this->primaryKeys.begin(); _iter1332 != this->primaryKeys.end(); ++_iter1332)
{
- xfer += (*_iter1313).write(oprot);
+ xfer += (*_iter1332).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5764,10 +5764,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeys.size()));
- std::vector<SQLForeignKey> ::const_iterator _iter1314;
- for (_iter1314 = this->foreignKeys.begin(); _iter1314 != this->foreignKeys.end(); ++_iter1314)
+ std::vector<SQLForeignKey> ::const_iterator _iter1333;
+ for (_iter1333 = this->foreignKeys.begin(); _iter1333 != this->foreignKeys.end(); ++_iter1333)
{
- xfer += (*_iter1314).write(oprot);
+ xfer += (*_iter1333).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5776,10 +5776,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->uniqueConstraints.size()));
- std::vector<SQLUniqueConstraint> ::const_iterator _iter1315;
- for (_iter1315 = this->uniqueConstraints.begin(); _iter1315 != this->uniqueConstraints.end(); ++_iter1315)
+ std::vector<SQLUniqueConstraint> ::const_iterator _iter1334;
+ for (_iter1334 = this->uniqueConstraints.begin(); _iter1334 != this->uniqueConstraints.end(); ++_iter1334)
{
- xfer += (*_iter1315).write(oprot);
+ xfer += (*_iter1334).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5788,10 +5788,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->notNullConstraints.size()));
- std::vector<SQLNotNullConstraint> ::const_iterator _iter1316;
- for (_iter1316 = this->notNullConstraints.begin(); _iter1316 != this->notNullConstraints.end(); ++_iter1316)
+ std::vector<SQLNotNullConstraint> ::const_iterator _iter1335;
+ for (_iter1335 = this->notNullConstraints.begin(); _iter1335 != this->notNullConstraints.end(); ++_iter1335)
{
- xfer += (*_iter1316).write(oprot);
+ xfer += (*_iter1335).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5800,10 +5800,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->defaultConstraints.size()));
- std::vector<SQLDefaultConstraint> ::const_iterator _iter1317;
- for (_iter1317 = this->defaultConstraints.begin(); _iter1317 != this->defaultConstraints.end(); ++_iter1317)
+ std::vector<SQLDefaultConstraint> ::const_iterator _iter1336;
+ for (_iter1336 = this->defaultConstraints.begin(); _iter1336 != this->defaultConstraints.end(); ++_iter1336)
{
- xfer += (*_iter1317).write(oprot);
+ xfer += (*_iter1336).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5812,10 +5812,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->checkConstraints.size()));
- std::vector<SQLCheckConstraint> ::const_iterator _iter1318;
- for (_iter1318 = this->checkConstraints.begin(); _iter1318 != this->checkConstraints.end(); ++_iter1318)
+ std::vector<SQLCheckConstraint> ::const_iterator _iter1337;
+ for (_iter1337 = this->checkConstraints.begin(); _iter1337 != this->checkConstraints.end(); ++_iter1337)
{
- xfer += (*_iter1318).write(oprot);
+ xfer += (*_iter1337).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5843,10 +5843,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->primaryKeys)).size()));
- std::vector<SQLPrimaryKey> ::const_iterator _iter1319;
- for (_iter1319 = (*(this->primaryKeys)).begin(); _iter1319 != (*(this->primaryKeys)).end(); ++_iter1319)
+ std::vector<SQLPrimaryKey> ::const_iterator _iter1338;
+ for (_iter1338 = (*(this->primaryKeys)).begin(); _iter1338 != (*(this->primaryKeys)).end(); ++_iter1338)
{
- xfer += (*_iter1319).write(oprot);
+ xfer += (*_iter1338).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5855,10 +5855,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->foreignKeys)).size()));
- std::vector<SQLForeignKey> ::const_iterator _iter1320;
- for (_iter1320 = (*(this->foreignKeys)).begin(); _iter1320 != (*(this->foreignKeys)).end(); ++_iter1320)
+ std::vector<SQLForeignKey> ::const_iterator _iter1339;
+ for (_iter1339 = (*(this->foreignKeys)).begin(); _iter1339 != (*(this->foreignKeys)).end(); ++_iter1339)
{
- xfer += (*_iter1320).write(oprot);
+ xfer += (*_iter1339).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5867,10 +5867,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->uniqueConstraints)).size()));
- std::vector<SQLUniqueConstraint> ::const_iterator _iter1321;
- for (_iter1321 = (*(this->uniqueConstraints)).begin(); _iter1321 != (*(this->uniqueConstraints)).end(); ++_iter1321)
+ std::vector<SQLUniqueConstraint> ::const_iterator _iter1340;
+ for (_iter1340 = (*(this->uniqueConstraints)).begin(); _iter1340 != (*(this->uniqueConstraints)).end(); ++_iter1340)
{
- xfer += (*_iter1321).write(oprot);
+ xfer += (*_iter1340).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5879,10 +5879,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->notNullConstraints)).size()));
- std::vector<SQLNotNullConstraint> ::const_iterator _iter1322;
- for (_iter1322 = (*(this->notNullConstraints)).begin(); _iter1322 != (*(this->notNullConstraints)).end(); ++_iter1322)
+ std::vector<SQLNotNullConstraint> ::const_iterator _iter1341;
+ for (_iter1341 = (*(this->notNullConstraints)).begin(); _iter1341 != (*(this->notNullConstraints)).end(); ++_iter1341)
{
- xfer += (*_iter1322).write(oprot);
+ xfer += (*_iter1341).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5891,10 +5891,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->defaultConstraints)).size()));
- std::vector<SQLDefaultConstraint> ::const_iterator _iter1323;
- for (_iter1323 = (*(this->defaultConstraints)).begin(); _iter1323 != (*(this->defaultConstraints)).end(); ++_iter1323)
+ std::vector<SQLDefaultConstraint> ::const_iterator _iter1342;
+ for (_iter1342 = (*(this->defaultConstraints)).begin(); _iter1342 != (*(this->defaultConstraints)).end(); ++_iter1342)
{
- xfer += (*_iter1323).write(oprot);
+ xfer += (*_iter1342).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -5903,10 +5903,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->checkConstraints)).size()));
- std::vector<SQLCheckConstraint> ::const_iterator _iter1324;
- for (_iter1324 = (*(this->checkConstraints)).begin(); _iter1324 != (*(this->checkConstraints)).end(); ++_iter1324)
+ std::vector<SQLCheckConstraint> ::const_iterator _iter1343;
+ for (_iter1343 = (*(this->checkConstraints)).begin(); _iter1343 != (*(this->checkConstraints)).end(); ++_iter1343)
{
- xfer += (*_iter1324).write(oprot);
+ xfer += (*_iter1343).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -8074,14 +8074,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partNames.clear();
- uint32_t _size1325;
- ::apache::thrift::protocol::TType _etype1328;
- xfer += iprot->readListBegin(_etype1328, _size1325);
- this->partNames.resize(_size1325);
- uint32_t _i1329;
- for (_i1329 = 0; _i1329 < _size1325; ++_i1329)
+ uint32_t _size1344;
+ ::apache::thrift::protocol::TType _etype1347;
+ xfer += iprot->readListBegin(_etype1347, _size1344);
+ this->partNames.resize(_size1344);
+ uint32_t _i1348;
+ for (_i1348 = 0; _i1348 < _size1344; ++_i1348)
{
- xfer += iprot->readString(this->partNames[_i1329]);
+ xfer += iprot->readString(this->partNames[_i1348]);
}
xfer += iprot->readListEnd();
}
@@ -8118,10 +8118,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partNames.size()));
- std::vector<std::string> ::const_iterator _iter1330;
- for (_iter1330 = this->partNames.begin(); _iter1330 != this->partNames.end(); ++_iter1330)
+ std::vector<std::string> ::const_iterator _iter1349;
+ for (_iter1349 = this->partNames.begin(); _iter1349 != this->partNames.end(); ++_iter1349)
{
- xfer += oprot->writeString((*_iter1330));
+ xfer += oprot->writeString((*_iter1349));
}
xfer += oprot->writeListEnd();
}
@@ -8153,10 +8153,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partNames)).size()));
- std::vector<std::string> ::const_iterator _iter1331;
- for (_iter1331 = (*(this->partNames)).begin(); _iter1331 != (*(this->partNames)).end(); ++_iter1331)
+ std::vector<std::string> ::const_iterator _iter1350;
+ for (_iter1350 = (*(this->partNames)).begin(); _iter1350 != (*(this->partNames)).end(); ++_iter1350)
{
- xfer += oprot->writeString((*_iter1331));
+ xfer += oprot->writeString((*_iter1350));
}
xfer += oprot->writeListEnd();
}
@@ -8400,14 +8400,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1332;
- ::apache::thrift::protocol::TType _etype1335;
- xfer += iprot->readListBegin(_etype1335, _size1332);
- this->success.resize(_size1332);
- uint32_t _i1336;
- for (_i1336 = 0; _i1336 < _size1332; ++_i1336)
+ uint32_t _size1351;
+ ::apache::thrift::protocol::TType _etype1354;
+ xfer += iprot->readListBegin(_etype1354, _size1351);
+ this->success.resize(_size1351);
+ uint32_t _i1355;
+ for (_i1355 = 0; _i1355 < _size1351; ++_i1355)
{
- xfer += iprot->readString(this->success[_i1336]);
+ xfer += iprot->readString(this->success[_i1355]);
}
xfer += iprot->readListEnd();
}
@@ -8446,10 +8446,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1337;
- for (_iter1337 = this->success.begin(); _iter1337 != this->success.end(); ++_iter1337)
+ std::vector<std::string> ::const_iterator _iter1356;
+ for (_iter1356 = this->success.begin(); _iter1356 != this->success.end(); ++_iter1356)
{
- xfer += oprot->writeString((*_iter1337));
+ xfer += oprot->writeString((*_iter1356));
}
xfer += oprot->writeListEnd();
}
@@ -8494,14 +8494,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1338;
- ::apache::thrift::protocol::TType _etype1341;
- xfer += iprot->readListBegin(_etype1341, _size1338);
- (*(this->success)).resize(_size1338);
- uint32_t _i1342;
- for (_i1342 = 0; _i1342 < _size1338; ++_i1342)
+ uint32_t _size1357;
+ ::apache::thrift::protocol::TType _etype1360;
+ xfer += iprot->readListBegin(_etype1360, _size1357);
+ (*(this->success)).resize(_size1357);
+ uint32_t _i1361;
+ for (_i1361 = 0; _i1361 < _size1357; ++_i1361)
{
- xfer += iprot->readString((*(this->success))[_i1342]);
+ xfer += iprot->readString((*(this->success))[_i1361]);
}
xfer += iprot->readListEnd();
}
@@ -8671,14 +8671,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1343;
- ::apache::thrift::protocol::TType _etype1346;
- xfer += iprot->readListBegin(_etype1346, _size1343);
- this->success.resize(_size1343);
- uint32_t _i1347;
- for (_i1347 = 0; _i1347 < _size1343; ++_i1347)
+ uint32_t _size1362;
+ ::apache::thrift::protocol::TType _etype1365;
+ xfer += iprot->readListBegin(_etype1365, _size1362);
+ this->success.resize(_size1362);
+ uint32_t _i1366;
+ for (_i1366 = 0; _i1366 < _size1362; ++_i1366)
{
- xfer += iprot->readString(this->success[_i1347]);
+ xfer += iprot->readString(this->success[_i1366]);
}
xfer += iprot->readListEnd();
}
@@ -8717,10 +8717,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift::
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1348;
- for (_iter1348 = this->success.begin(); _iter1348 != this->success.end(); ++_iter1348)
+ std::vector<std::string> ::const_iterator _iter1367;
+ for (_iter1367 = this->success.begin(); _iter1367 != this->success.end(); ++_iter1367)
{
- xfer += oprot->writeString((*_iter1348));
+ xfer += oprot->writeString((*_iter1367));
}
xfer += oprot->writeListEnd();
}
@@ -8765,14 +8765,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1349;
- ::apache::thrift::protocol::TType _etype1352;
- xfer += iprot->readListBegin(_etype1352, _size1349);
- (*(this->success)).resize(_size1349);
- uint32_t _i1353;
- for (_i1353 = 0; _i1353 < _size1349; ++_i1353)
+ uint32_t _size1368;
+ ::apache::thrift::protocol::TType _etype1371;
+ xfer += iprot->readListBegin(_etype1371, _size1368);
+ (*(this->success)).resize(_size1368);
+ uint32_t _i1372;
+ for (_i1372 = 0; _i1372 < _size1368; ++_i1372)
{
- xfer += iprot->readString((*(this->success))[_i1353]);
+ xfer += iprot->readString((*(this->success))[_i1372]);
}
xfer += iprot->readListEnd();
}
@@ -8910,14 +8910,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::read(:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1354;
- ::apache::thrift::protocol::TType _etype1357;
- xfer += iprot->readListBegin(_etype1357, _size1354);
- this->success.resize(_size1354);
- uint32_t _i1358;
- for (_i1358 = 0; _i1358 < _size1354; ++_i1358)
+ uint32_t _size1373;
+ ::apache::thrift::protocol::TType _etype1376;
+ xfer += iprot->readListBegin(_etype1376, _size1373);
+ this->success.resize(_size1373);
+ uint32_t _i1377;
+ for (_i1377 = 0; _i1377 < _size1373; ++_i1377)
{
- xfer += iprot->readString(this->success[_i1358]);
+ xfer += iprot->readString(this->success[_i1377]);
}
xfer += iprot->readListEnd();
}
@@ -8956,10 +8956,10 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::write(
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1359;
- for (_iter1359 = this->success.begin(); _iter1359 != this->success.end(); ++_iter1359)
+ std::vector<std::string> ::const_iterator _iter1378;
+ for (_iter1378 = this->success.begin(); _iter1378 != this->success.end(); ++_iter1378)
{
- xfer += oprot->writeString((*_iter1359));
+ xfer += oprot->writeString((*_iter1378));
}
xfer += oprot->writeListEnd();
}
@@ -9004,14 +9004,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::read(
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1360;
- ::apache::thrift::protocol::TType _etype1363;
- xfer += iprot->readListBegin(_etype1363, _size1360);
- (*(this->success)).resize(_size1360);
- uint32_t _i1364;
- for (_i1364 = 0; _i1364 < _size1360; ++_i1364)
+ uint32_t _size1379;
+ ::apache::thrift::protocol::TType _etype1382;
+ xfer += iprot->readListBegin(_etype1382, _size1379);
+ (*(this->success)).resize(_size1379);
+ uint32_t _i1383;
+ for (_i1383 = 0; _i1383 < _size1379; ++_i1383)
{
- xfer += iprot->readString((*(this->success))[_i1364]);
+ xfer += iprot->readString((*(this->success))[_i1383]);
}
xfer += iprot->readListEnd();
}
@@ -9086,14 +9086,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tbl_types.clear();
- uint32_t _size1365;
- ::apache::thrift::protocol::TType _etype1368;
- xfer += iprot->readListBegin(_etype1368, _size1365);
- this->tbl_types.resize(_size1365);
- uint32_t _i1369;
- for (_i1369 = 0; _i1369 < _size1365; ++_i1369)
+ uint32_t _size1384;
+ ::apache::thrift::protocol::TType _etype1387;
+ xfer += iprot->readListBegin(_etype1387, _size1384);
+ this->tbl_types.resize(_size1384);
+ uint32_t _i1388;
+ for (_i1388 = 0; _i1388 < _size1384; ++_i1388)
{
- xfer += iprot->readString(this->tbl_types[_i1369]);
+ xfer += iprot->readString(this->tbl_types[_i1388]);
}
xfer += iprot->readListEnd();
}
@@ -9130,10 +9130,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_types.size()));
- std::vector<std::string> ::const_iterator _iter1370;
- for (_iter1370 = this->tbl_types.begin(); _iter1370 != this->tbl_types.end(); ++_iter1370)
+ std::vector<std::string> ::const_iterator _iter1389;
+ for (_iter1389 = this->tbl_types.begin(); _iter1389 != this->tbl_types.end(); ++_iter1389)
{
- xfer += oprot->writeString((*_iter1370));
+ xfer += oprot->writeString((*_iter1389));
}
xfer += oprot->writeListEnd();
}
@@ -9165,10 +9165,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_types)).size()));
- std::vector<std::string> ::const_iterator _iter1371;
- for (_iter1371 = (*(this->tbl_types)).begin(); _iter1371 != (*(this->tbl_types)).end(); ++_iter1371)
+ std::vector<std::string> ::const_iterator _iter1390;
+ for (_iter1390 = (*(this->tbl_types)).begin(); _iter1390 != (*(this->tbl_types)).end(); ++_iter1390)
{
- xfer += oprot->writeString((*_iter1371));
+ xfer += oprot->writeString((*_iter1390));
}
xfer += oprot->writeListEnd();
}
@@ -9209,14 +9209,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1372;
- ::apache::thrift::protocol::TType _etype1375;
- xfer += iprot->readListBegin(_etype1375, _size1372);
- this->success.resize(_size1372);
- uint32_t _i1376;
- for (_i1376 = 0; _i1376 < _size1372; ++_i1376)
+ uint32_t _size1391;
+ ::apache::thrift::protocol::TType _etype1394;
+ xfer += iprot->readListBegin(_etype1394, _size1391);
+ this->success.resize(_size1391);
+ uint32_t _i1395;
+ for (_i1395 = 0; _i1395 < _size1391; ++_i1395)
{
- xfer += this->success[_i1376].read(iprot);
+ xfer += this->success[_i1395].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9255,10 +9255,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<TableMeta> ::const_iterator _iter1377;
- for (_iter1377 = this->success.begin(); _iter1377 != this->success.end(); ++_iter1377)
+ std::vector<TableMeta> ::const_iterator _iter1396;
+ for (_iter1396 = this->success.begin(); _iter1396 != this->success.end(); ++_iter1396)
{
- xfer += (*_iter1377).write(oprot);
+ xfer += (*_iter1396).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -9303,14 +9303,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1378;
- ::apache::thrift::protocol::TType _etype1381;
- xfer += iprot->readListBegin(_etype1381, _size1378);
- (*(this->success)).resize(_size1378);
- uint32_t _i1382;
- for (_i1382 = 0; _i1382 < _size1378; ++_i1382)
+ uint32_t _size1397;
+ ::apache::thrift::protocol::TType _etype1400;
+ xfer += iprot->readListBegin(_etype1400, _size1397);
+ (*(this->success)).resize(_size1397);
+ uint32_t _i1401;
+ for (_i1401 = 0; _i1401 < _size1397; ++_i1401)
{
- xfer += (*(this->success))[_i1382].read(iprot);
+ xfer += (*(this->success))[_i1401].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -9448,14 +9448,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1383;
- ::apache::thrift::protocol::TType _etype1386;
- xfer += iprot->readListBegin(_etype1386, _size1383);
- this->success.resize(_size1383);
- uint32_t _i1387;
- for (_i1387 = 0; _i1387 < _size1383; ++_i1387)
+ uint32_t _size1402;
+ ::apache::thrift::protocol::TType _etype1405;
+ xfer += iprot->readListBegin(_etype1405, _size1402);
+ this->success.resize(_size1402);
+ uint32_t _i1406;
+ for (_i1406 = 0; _i1406 < _size1402; ++_i1406)
{
- xfer += iprot->readString(this->success[_i1387]);
+ xfer += iprot->readString(this->success[_i1406]);
}
xfer += iprot->readListEnd();
}
@@ -9494,10 +9494,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1388;
- for (_iter1388 = this->success.begin(); _iter1388 != this->success.end(); ++_iter1388)
+ std::vector<std::string> ::const_iterator _iter1407;
+ for (_iter1407 = this->success.begin(); _iter1407 != this->success.end(); ++_iter1407)
{
- xfer += oprot->writeString((*_iter1388));
+ xfer += oprot->writeString((*_iter1407));
}
xfer += oprot->writeListEnd();
}
@@ -9542,14 +9542,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1389;
- ::apache::thrift::protocol::TType _etype1392;
- xfer += iprot->readListBegin(_etype1392, _size1389);
- (*(this->success)).resize(_size1389);
- uint32_t _i1393;
- for (_i1393 = 0; _i1393 < _size1389; ++_i1393)
+ uint32_t _size1408;
+ ::apache::thrift::protocol::TType _etype1411;
+ xfer += iprot->readListBegin(_etype1411, _size1408);
+ (*(this->success)).resize(_size1408);
+ uint32_t _i1412;
+ for (_i1412 = 0; _i1412 < _size1408; ++_i1412)
{
- xfer += iprot->readString((*(this->success))[_i1393]);
+ xfer += iprot->readString((*(this->success))[_i1412]);
}
xfer += iprot->readListEnd();
}
@@ -9859,14 +9859,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tbl_names.clear();
- uint32_t _size1394;
- ::apache::thrift::protocol::TType _etype1397;
- xfer += iprot->readListBegin(_etype1397, _size1394);
- this->tbl_names.resize(_size1394);
- uint32_t _i1398;
- for (_i1398 = 0; _i1398 < _size1394; ++_i1398)
+ uint32_t _size1413;
+ ::apache::thrift::protocol::TType _etype1416;
+ xfer += iprot->readListBegin(_etype1416, _size1413);
+ this->tbl_names.resize(_size1413);
+ uint32_t _i1417;
+ for (_i1417 = 0; _i1417 < _size1413; ++_i1417)
{
- xfer += iprot->readString(this->tbl_names[_i1398]);
+ xfer += iprot->readString(this->tbl_names[_i1417]);
}
xfer += iprot->readListEnd();
}
@@ -9899,10 +9899,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
- std::vector<std::string> ::const_iterator _iter1399;
- for (_iter1399 = this->tbl_names.begin(); _iter1399 != this->tbl_names.end(); ++_iter1399)
+ std::vector<std::string> ::const_iterator _iter1418;
+ for (_iter1418 = this->tbl_names.begin(); _iter1418 != this->tbl_names.end(); ++_iter1418)
{
- xfer += oprot->writeString((*_iter1399));
+ xfer += oprot->writeString((*_iter1418));
}
xfer += oprot->writeListEnd();
}
@@ -9930,10 +9930,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
- std::vector<std::string> ::const_iterator _iter1400;
- for (_iter1400 = (*(this->tbl_names)).begin(); _iter1400 != (*(this->tbl_names)).end(); ++_iter1400)
+ std::vector<std::string> ::const_iterator _iter1419;
+ for (_iter1419 = (*(this->tbl_names)).begin(); _iter1419 != (*(this->tbl_names)).end(); ++_iter1419)
{
- xfer += oprot->writeString((*_iter1400));
+ xfer += oprot->writeString((*_iter1419));
}
xfer += oprot->writeListEnd();
}
@@ -9974,14 +9974,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1401;
- ::apache::thrift::protocol::TType _etype1404;
- xfer += iprot->readListBegin(_etype1404, _size1401);
- this->success.resize(_size1401);
- uint32_t _i1405;
- for (_i1405 = 0; _i1405 < _size1401; ++_i1405)
+ uint32_t _size1420;
+ ::apache::thrift::protocol::TType _etype1423;
+ xfer += iprot->readListBegin(_etype1423, _size1420);
+ this->success.resize(_size1420);
+ uint32_t _i1424;
+ for (_i1424 = 0; _i1424 < _size1420; ++_i1424)
{
- xfer += this->success[_i1405].read(iprot);
+ xfer += this->success[_i1424].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10012,10 +10012,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::vector<Table> ::const_iterator _iter1406;
- for (_iter1406 = this->success.begin(); _iter1406 != this->success.end(); ++_iter1406)
+ std::vector<Table> ::const_iterator _iter1425;
+ for (_iter1425 = this->success.begin(); _iter1425 != this->success.end(); ++_iter1425)
{
- xfer += (*_iter1406).write(oprot);
+ xfer += (*_iter1425).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -10056,14 +10056,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1407;
- ::apache::thrift::protocol::TType _etype1410;
- xfer += iprot->readListBegin(_etype1410, _size1407);
- (*(this->success)).resize(_size1407);
- uint32_t _i1411;
- for (_i1411 = 0; _i1411 < _size1407; ++_i1411)
+ uint32_t _size1426;
+ ::apache::thrift::protocol::TType _etype1429;
+ xfer += iprot->readListBegin(_etype1429, _size1426);
+ (*(this->success)).resize(_size1426);
+ uint32_t _i1430;
+ for (_i1430 = 0; _i1430 < _size1426; ++_i1430)
{
- xfer += (*(this->success))[_i1411].read(iprot);
+ xfer += (*(this->success))[_i1430].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -10596,14 +10596,14 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::read(::
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tbl_names.clear();
- uint32_t _size1412;
- ::apache::thrift::protocol::TType _etype1415;
- xfer += iprot->readListBegin(_etype1415, _size1412);
- this->tbl_names.resize(_size1412);
- uint32_t _i1416;
- for (_i1416 = 0; _i1416 < _size1412; ++_i1416)
+ uint32_t _size1431;
+ ::apache::thrift::protocol::TType _etype1434;
+ xfer += iprot->readListBegin(_etype1434, _size1431);
+ this->tbl_names.resize(_size1431);
+ uint32_t _i1435;
+ for (_i1435 = 0; _i1435 < _size1431; ++_i1435)
{
- xfer += iprot->readString(this->tbl_names[_i1416]);
+ xfer += iprot->readString(this->tbl_names[_i1435]);
}
xfer += iprot->readListEnd();
}
@@ -10636,10 +10636,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::write(:
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
- std::vector<std::string> ::const_iterator _iter1417;
- for (_iter1417 = this->tbl_names.begin(); _iter1417 != this->tbl_names.end(); ++_iter1417)
+ std::vector<std::string> ::const_iterator _iter1436;
+ for (_iter1436 = this->tbl_names.begin(); _iter1436 != this->tbl_names.end(); ++_iter1436)
{
- xfer += oprot->writeString((*_iter1417));
+ xfer += oprot->writeString((*_iter1436));
}
xfer += oprot->writeListEnd();
}
@@ -10667,10 +10667,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_pargs::write(
xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
- std::vector<std::string> ::const_iterator _iter1418;
- for (_iter1418 = (*(this->tbl_names)).begin(); _iter1418 != (*(this->tbl_names)).end(); ++_iter1418)
+ std::vector<std::string> ::const_iterator _iter1437;
+ for (_iter1437 = (*(this->tbl_names)).begin(); _iter1437 != (*(this->tbl_names)).end(); ++_iter1437)
{
- xfer += oprot->writeString((*_iter1418));
+ xfer += oprot->writeString((*_iter1437));
}
xfer += oprot->writeListEnd();
}
@@ -10711,17 +10711,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::read(
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->success.clear();
- uint32_t _size1419;
- ::apache::thrift::protocol::TType _ktype1420;
- ::apache::thrift::protocol::TType _vtype1421;
- xfer += iprot->readMapBegin(_ktype1420, _vtype1421, _size1419);
- uint32_t _i1423;
- for (_i1423 = 0; _i1423 < _size1419; ++_i1423)
+ uint32_t _size1438;
+ ::apache::thrift::protocol::TType _ktype1439;
+ ::apache::thrift::protocol::TType _vtype1440;
+ xfer += iprot->readMapBegin(_ktype1439, _vtype1440, _size1438);
+ uint32_t _i1442;
+ for (_i1442 = 0; _i1442 < _size1438; ++_i1442)
{
- std::string _key1424;
- xfer += iprot->readString(_key1424);
- Materialization& _val1425 = this->success[_key1424];
- xfer += _val1425.read(iprot);
+ std::string _key1443;
+ xfer += iprot->readString(_key1443);
+ Materialization& _val1444 = this->success[_key1443];
+ xfer += _val1444.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -10776,11 +10776,11 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::write
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
- std::map<std::string, Materialization> ::const_iterator _iter1426;
- for (_iter1426 = this->success.begin(); _iter1426 != this->success.end(); ++_iter1426)
+ std::map<std::string, Materialization> ::const_iterator _iter1445;
+ for (_iter1445 = this->success.begin(); _iter1445 != this->success.end(); ++_iter1445)
{
- xfer += oprot->writeString(_iter1426->first);
- xfer += _iter1426->second.write(oprot);
+ xfer += oprot->writeString(_iter1445->first);
+ xfer += _iter1445->second.write(oprot);
}
xfer += oprot->writeMapEnd();
}
@@ -10833,17 +10833,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
(*(this->success)).clear();
- uint32_t _size1427;
- ::apache::thrift::protocol::TType _ktype1428;
- ::apache::thrift::protocol::TType _vtype1429;
- xfer += iprot->readMapBegin(_ktype1428, _vtype1429, _size1427);
- uint32_t _i1431;
- for (_i1431 = 0; _i1431 < _size1427; ++_i1431)
+ uint32_t _size1446;
+ ::apache::thrift::protocol::TType _ktype1447;
+ ::apache::thrift::protocol::TType _vtype1448;
+ xfer += iprot->readMapBegin(_ktype1447, _vtype1448, _size1446);
+ uint32_t _i1450;
+ for (_i1450 = 0; _i1450 < _size1446; ++_i1450)
{
- std::string _key1432;
- xfer += iprot->readString(_key1432);
- Materialization& _val1433 = (*(this->success))[_key1432];
- xfer += _val1433.read(iprot);
+ std::string _key1451;
+ xfer += iprot->readString(_key1451);
+ Materialization& _val1452 = (*(this->success))[_key1451];
+ xfer += _val1452.read(iprot);
}
xfer += iprot->readMapEnd();
}
@@ -11304,14 +11304,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->success.clear();
- uint32_t _size1434;
- ::apache::thrift::protocol::TType _etype1437;
- xfer += iprot->readListBegin(_etype1437, _size1434);
- this->success.resize(_size1434);
- uint32_t _i1438;
- for (_i1438 = 0; _i1438 < _size1434; ++_i1438)
+ uint32_t _size1453;
+ ::apache::thrift::protocol::TType _etype1456;
+ xfer += iprot->readListBegin(_etype1456, _size1453);
+ this->success.resize(_size1453);
+ uint32_t _i1457;
+ for (_i1457 = 0; _i1457 < _size1453; ++_i1457)
{
- xfer += iprot->readString(this->success[_i1438]);
+ xfer += iprot->readString(this->success[_i1457]);
}
xfer += iprot->readListEnd();
}
@@ -11366,10 +11366,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
- std::vector<std::string> ::const_iterator _iter1439;
- for (_iter1439 = this->success.begin(); _iter1439 != this->success.end(); ++_iter1439)
+ std::vector<std::string> ::const_iterator _iter1458;
+ for (_iter1458 = this->success.begin(); _iter1458 != this->success.end(); ++_iter1458)
{
- xfer += oprot->writeString((*_iter1439));
+ xfer += oprot->writeString((*_iter1458));
}
xfer += oprot->writeListEnd();
}
@@ -11422,14 +11422,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
(*(this->success)).clear();
- uint32_t _size1440;
- ::apache::thrift::protocol::TType _etype1443;
- xfer += iprot->readListBegin(_etype1443, _size1440);
- (*(this->success)).resize(_size1440);
- uint32_t _i1444;
- for (_i1444 = 0; _i1444 < _size1440; ++_i1444)
+ uint32_t _size1459;
+ ::apache::thrift::protocol::TType _etype1462;
+ xfer += iprot->readListBegin(_etype1462, _size1459);
+ (*(this->success)).resize(_size1459);
+ uint32_t _i1463;
+ for (_i1463 = 0; _i1463 < _size1459; ++_i1463)
{
- xfer += iprot->readString((*(this->success))[_i1444]);
+ xfer += iprot->readString((*(this->success))[_i1463]);
}
xfer += iprot->readListEnd();
}
@@ -12763,14 +12763,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->new_parts.clear();
- uint32_t _size1445;
- ::apache::thrift::protocol::TType _etype1448;
- xfer += iprot->readListBegin(_etype1448, _size1445);
- this->new_parts.resize(_size1445);
- uint32_t _i1449;
- for (_i1449 = 0; _i1449 < _size1445; ++_i1449)
+ uint32_t _size1464;
+ ::apache::thrift::protocol::TType _etype1467;
+ xfer += iprot->readListBegin(_etype1467, _size1464);
+ this->new_parts.resize(_size1464);
+ uint32_t _i1468;
+ for (_i1468 = 0; _i1468 < _size1464; ++_i1468)
{
- xfer += this->new_parts[_i1449].read(iprot);
+ xfer += this->new_parts[_i1468].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -12799,10 +12799,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
- std::vector<Partition> ::const_iterator _iter1450;
- for (_iter1450 = this->new_parts.begin(); _iter1450 != this->new_parts.end(); ++_iter1450)
+ std::vector<Partition> ::const_iterator _iter1469;
+ for (_iter1469 = this->new_parts.begin(); _iter1469 != this->new_parts.end(); ++_iter1469)
{
- xfer += (*_iter1450).write(oprot);
+ xfer += (*_iter1469).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -12826,10 +12826,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
- std::vector<Partition> ::const_iterator _iter1451;
- for (_iter1451 = (*(this->new_parts)).begin(); _iter1451 != (*(this->new_parts)).end(); ++_iter1451)
+ std::vector<Partition> ::const_iterator _iter1470;
+ for (_iter1470 = (*(this->new_parts)).begin(); _iter1470 != (*(this->new_parts)).end(); ++_iter1470)
{
- xfer += (*_iter1451).write(oprot);
+ xfer += (*_iter1470).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -13038,14 +13038,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->new_parts.clear();
- uint32_t _size1452;
- ::apache::thrift::protocol::TType _etype1455;
- xfer += iprot->readListBegin(_etype1455, _size1452);
- this->new_parts.resize(_size1452);
- uint32_t _i1456;
- for (_i1456 = 0; _i1456 < _size1452; ++_i1456)
+ uint32_t _size1471;
+ ::apache::thrift::protocol::TType _etype1474;
+ xfer += iprot->readListBegin(_etype1474, _size1471);
+ this->new_parts.resize(_size1471);
+ uint32_t _i1475;
+ for (_i1475 = 0; _i1475 < _size1471; ++_i1475)
{
- xfer += this->new_parts[_i1456].read(iprot);
+ xfer += this->new_parts[_i1475].read(iprot);
}
xfer += iprot->readListEnd();
}
@@ -13074,10 +13074,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift::
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
- std::vector<PartitionSpec> ::const_iterator _iter1457;
- for (_iter1457 = this->new_parts.begin(); _iter1457 != this->new_parts.end(); ++_iter1457)
+ std::vector<PartitionSpec> ::const_iterator _iter1476;
+ for (_iter1476 = this->new_parts.begin(); _iter1476 != this->new_parts.end(); ++_iter1476)
{
- xfer += (*_iter1457).write(oprot);
+ xfer += (*_iter1476).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -13101,10 +13101,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift:
xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
- std::vector<PartitionSpec> ::const_iterator _iter1458;
- for (_iter1458 = (*(this->new_parts)).begin(); _iter1458 != (*(this->new_parts)).end(); ++_iter1458)
+ std::vector<PartitionSpec> ::const_iterator _iter1477;
+ for (_iter1477 = (*(this->new_parts)).begin(); _iter1477 != (*(this->new_parts)).end(); ++_iter1477)
{
- xfer += (*_iter1458).write(oprot);
+ xfer += (*_iter1477).write(oprot);
}
xfer += oprot->writeListEnd();
}
@@ -13329,14 +13329,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1459;
- ::apache::thrift::protocol::TType _etype1462;
- xfer += iprot->readListBegin(_etype1462, _size1459);
- this->part_vals.resize(_size1459);
- uint32_t _i1463;
- for (_i1463 = 0; _i1463 < _size1459; ++_i1463)
+ uint32_t _size1478;
+ ::apache::thrift::protocol::TType _etype1481;
+ xfer += iprot->readListBegin(_etype1481, _size1478);
+ this->part_vals.resize(_size1478);
+ uint32_t _i1482;
+ for (_i1482 = 0; _i1482 < _size1478; ++_i1482)
{
- xfer += iprot->readString(this->part_vals[_i1463]);
+ xfer += iprot->readString(this->part_vals[_i1482]);
}
xfer += iprot->readListEnd();
}
@@ -13373,10 +13373,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1464;
- for (_iter1464 = this->part_vals.begin(); _iter1464 != this->part_vals.end(); ++_iter1464)
+ std::vector<std::string> ::const_iterator _iter1483;
+ for (_iter1483 = this->part_vals.begin(); _iter1483 != this->part_vals.end(); ++_iter1483)
{
- xfer += oprot->writeString((*_iter1464));
+ xfer += oprot->writeString((*_iter1483));
}
xfer += oprot->writeListEnd();
}
@@ -13408,10 +13408,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1465;
- for (_iter1465 = (*(this->part_vals)).begin(); _iter1465 != (*(this->part_vals)).end(); ++_iter1465)
+ std::vector<std::string> ::const_iterator _iter1484;
+ for (_iter1484 = (*(this->part_vals)).begin(); _iter1484 != (*(this->part_vals)).end(); ++_iter1484)
{
- xfer += oprot->writeString((*_iter1465));
+ xfer += oprot->writeString((*_iter1484));
}
xfer += oprot->writeListEnd();
}
@@ -13883,14 +13883,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1466;
- ::apache::thrift::protocol::TType _etype1469;
- xfer += iprot->readListBegin(_etype1469, _size1466);
- this->part_vals.resize(_size1466);
- uint32_t _i1470;
- for (_i1470 = 0; _i1470 < _size1466; ++_i1470)
+ uint32_t _size1485;
+ ::apache::thrift::protocol::TType _etype1488;
+ xfer += iprot->readListBegin(_etype1488, _size1485);
+ this->part_vals.resize(_size1485);
+ uint32_t _i1489;
+ for (_i1489 = 0; _i1489 < _size1485; ++_i1489)
{
- xfer += iprot->readString(this->part_vals[_i1470]);
+ xfer += iprot->readString(this->part_vals[_i1489]);
}
xfer += iprot->readListEnd();
}
@@ -13935,10 +13935,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1471;
- for (_iter1471 = this->part_vals.begin(); _iter1471 != this->part_vals.end(); ++_iter1471)
+ std::vector<std::string> ::const_iterator _iter1490;
+ for (_iter1490 = this->part_vals.begin(); _iter1490 != this->part_vals.end(); ++_iter1490)
{
- xfer += oprot->writeString((*_iter1471));
+ xfer += oprot->writeString((*_iter1490));
}
xfer += oprot->writeListEnd();
}
@@ -13974,10 +13974,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1472;
- for (_iter1472 = (*(this->part_vals)).begin(); _iter1472 != (*(this->part_vals)).end(); ++_iter1472)
+ std::vector<std::string> ::const_iterator _iter1491;
+ for (_iter1491 = (*(this->part_vals)).begin(); _iter1491 != (*(this->part_vals)).end(); ++_iter1491)
{
- xfer += oprot->writeString((*_iter1472));
+ xfer += oprot->writeString((*_iter1491));
}
xfer += oprot->writeListEnd();
}
@@ -14780,14 +14780,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1473;
- ::apache::thrift::protocol::TType _etype1476;
- xfer += iprot->readListBegin(_etype1476, _size1473);
- this->part_vals.resize(_size1473);
- uint32_t _i1477;
- for (_i1477 = 0; _i1477 < _size1473; ++_i1477)
+ uint32_t _size1492;
+ ::apache::thrift::protocol::TType _etype1495;
+ xfer += iprot->readListBegin(_etype1495, _size1492);
+ this->part_vals.resize(_size1492);
+ uint32_t _i1496;
+ for (_i1496 = 0; _i1496 < _size1492; ++_i1496)
{
- xfer += iprot->readString(this->part_vals[_i1477]);
+ xfer += iprot->readString(this->part_vals[_i1496]);
}
xfer += iprot->readListEnd();
}
@@ -14832,10 +14832,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1478;
- for (_iter1478 = this->part_vals.begin(); _iter1478 != this->part_vals.end(); ++_iter1478)
+ std::vector<std::string> ::const_iterator _iter1497;
+ for (_iter1497 = this->part_vals.begin(); _iter1497 != this->part_vals.end(); ++_iter1497)
{
- xfer += oprot->writeString((*_iter1478));
+ xfer += oprot->writeString((*_iter1497));
}
xfer += oprot->writeListEnd();
}
@@ -14871,10 +14871,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1479;
- for (_iter1479 = (*(this->part_vals)).begin(); _iter1479 != (*(this->part_vals)).end(); ++_iter1479)
+ std::vector<std::string> ::const_iterator _iter1498;
+ for (_iter1498 = (*(this->part_vals)).begin(); _iter1498 != (*(this->part_vals)).end(); ++_iter1498)
{
- xfer += oprot->writeString((*_iter1479));
+ xfer += oprot->writeString((*_iter1498));
}
xfer += oprot->writeListEnd();
}
@@ -15083,14 +15083,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read(
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1480;
- ::apache::thrift::protocol::TType _etype1483;
- xfer += iprot->readListBegin(_etype1483, _size1480);
- this->part_vals.resize(_size1480);
- uint32_t _i1484;
- for (_i1484 = 0; _i1484 < _size1480; ++_i1484)
+ uint32_t _size1499;
+ ::apache::thrift::protocol::TType _etype1502;
+ xfer += iprot->readListBegin(_etype1502, _size1499);
+ this->part_vals.resize(_size1499);
+ uint32_t _i1503;
+ for (_i1503 = 0; _i1503 < _size1499; ++_i1503)
{
- xfer += iprot->readString(this->part_vals[_i1484]);
+ xfer += iprot->readString(this->part_vals[_i1503]);
}
xfer += iprot->readListEnd();
}
@@ -15143,10 +15143,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1485;
- for (_iter1485 = this->part_vals.begin(); _iter1485 != this->part_vals.end(); ++_iter1485)
+ std::vector<std::string> ::const_iterator _iter1504;
+ for (_iter1504 = this->part_vals.begin(); _iter1504 != this->part_vals.end(); ++_iter1504)
{
- xfer += oprot->writeString((*_iter1485));
+ xfer += oprot->writeString((*_iter1504));
}
xfer += oprot->writeListEnd();
}
@@ -15186,10 +15186,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1486;
- for (_iter1486 = (*(this->part_vals)).begin(); _iter1486 != (*(this->part_vals)).end(); ++_iter1486)
+ std::vector<std::string> ::const_iterator _iter1505;
+ for (_iter1505 = (*(this->part_vals)).begin(); _iter1505 != (*(this->part_vals)).end(); ++_iter1505)
{
- xfer += oprot->writeString((*_iter1486));
+ xfer += oprot->writeString((*_iter1505));
}
xfer += oprot->writeListEnd();
}
@@ -16195,14 +16195,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->part_vals.clear();
- uint32_t _size1487;
- ::apache::thrift::protocol::TType _etype1490;
- xfer += iprot->readListBegin(_etype1490, _size1487);
- this->part_vals.resize(_size1487);
- uint32_t _i1491;
- for (_i1491 = 0; _i1491 < _size1487; ++_i1491)
+ uint32_t _size1506;
+ ::apache::thrift::protocol::TType _etype1509;
+ xfer += iprot->readListBegin(_etype1509, _size1506);
+ this->part_vals.resize(_size1506);
+ uint32_t _i1510;
+ for (_i1510 = 0; _i1510 < _size1506; ++_i1510)
{
- xfer += iprot->readString(this->part_vals[_i1491]);
+ xfer += iprot->readString(this->part_vals[_i1510]);
}
xfer += iprot->readListEnd();
}
@@ -16239,10 +16239,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
- std::vector<std::string> ::const_iterator _iter1492;
- for (_iter1492 = this->part_vals.begin(); _iter1492 != this->part_vals.end(); ++_iter1492)
+ std::vector<std::string> ::const_iterator _iter1511;
+ for (_iter1511 = this->part_vals.begin(); _iter1511 != this->part_vals.end(); ++_iter1511)
{
- xfer += oprot->writeString((*_iter1492));
+ xfer += oprot->writeString((*_iter1511));
}
xfer += oprot->writeListEnd();
}
@@ -16274,10 +16274,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc
xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
- std::vector<std::string> ::const_iterator _iter1493;
- for (_iter1493 = (*(this->part_vals)).begin(); _iter1493 != (*(this->part_vals)).end(); ++_iter1493)
+ std::vector<std::string> ::const_iterator _iter1512;
+ for (_iter1512 = (*(this->part_vals)).begin(); _iter1512 != (*(this->part_vals)).end(); ++_iter1512)
{
- xfer += oprot->writeString((*_iter1493));
+ xfer += oprot->writeString((*_iter1512));
}
xfer += oprot->writeListEnd();
<TRUNCATED>
[51/67] [abbrv] hive git commit: HIVE-19532 : 04 patch (Steve Yeom)
Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index 4e3068d..f2642cf 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hive.metastore.txn;
+import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.common.classification.RetrySemantics;
import org.apache.hadoop.hive.metastore.api.CompactionType;
import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -576,8 +577,8 @@ class CompactionTxnHandler extends TxnHandler {
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
stmt = dbConn.createStatement();
String s = "select txn_id from TXNS where " +
- "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " +
- "txn_state = '" + TXN_ABORTED + "'";
+ "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " +
+ "txn_state = '" + TXN_ABORTED + "'";
LOG.debug("Going to execute query <" + s + ">");
rs = stmt.executeQuery(s);
List<Long> txnids = new ArrayList<>();
@@ -587,10 +588,71 @@ class CompactionTxnHandler extends TxnHandler {
return;
}
Collections.sort(txnids);//easier to read logs
+
List<String> queries = new ArrayList<>();
StringBuilder prefix = new StringBuilder();
StringBuilder suffix = new StringBuilder();
+ // Turn off COLUMN_STATS_ACCURATE for txnids' components in TBLS and PARTITIONS
+ for (Long txnId : txnids) {
+ // Get table ids for the current txnId.
+ s = "select tbl_id from TBLS where txn_id = " + txnId;
+ LOG.debug("Going to execute query <" + s + ">");
+ rs = stmt.executeQuery(s);
+ List<Long> tblIds = new ArrayList<>();
+ while (rs.next()) {
+ tblIds.add(rs.getLong(1));
+ }
+ close(rs);
+ if(tblIds.size() <= 0) {
+ continue;
+ }
+
+ // Update COLUMN_STATS_AcCURATE.BASIC_STATS to false for each tableId.
+ prefix.append("delete from TABLE_PARAMS " +
+ " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and ");
+ suffix.append("");
+ TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, tblIds, "tbl_id", true, false);
+
+ for (String query : queries) {
+ LOG.debug("Going to execute update <" + query + ">");
+ int rc = stmt.executeUpdate(query);
+ LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from TBLS");
+ }
+
+ queries.clear();
+ prefix.setLength(0);
+ suffix.setLength(0);
+
+ // Get partition ids for the current txnId.
+ s = "select part_id from PARTITIONS where txn_id = " + txnId;
+ LOG.debug("Going to execute query <" + s + ">");
+ rs = stmt.executeQuery(s);
+ List<Long> ptnIds = new ArrayList<>();
+ while (rs.next()) ptnIds.add(rs.getLong(1));
+ close(rs);
+ if(ptnIds.size() <= 0) {
+ continue;
+ }
+
+ // Update COLUMN_STATS_AcCURATE.BASIC_STATS to false for each ptnId.
+ prefix.append("delete from PARTITION_PARAMS " +
+ " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and ");
+ suffix.append("");
+ TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, ptnIds, "part_id", true, false);
+
+ for (String query : queries) {
+ LOG.debug("Going to execute update <" + query + ">");
+ int rc = stmt.executeUpdate(query);
+ LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from PARTITIONS");
+ }
+
+ queries.clear();
+ prefix.setLength(0);
+ suffix.setLength(0);
+ }
+
+ // Delete from TXNS.
prefix.append("delete from TXNS where ");
suffix.append("");
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index 50bfca3..bfbd928 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@ -28,9 +28,12 @@ import java.sql.Statement;
import java.util.Properties;
import com.google.common.annotations.VisibleForTesting;
+import jline.internal.Log;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.zookeeper.txn.TxnHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -195,6 +198,68 @@ public final class TxnDbUtil {
);
try {
+ stmt.execute("CREATE TABLE \"APP\".\"TBLS\" (\"TBL_ID\" BIGINT NOT NULL, " +
+ " \"CREATE_TIME\" INTEGER NOT NULL, \"DB_ID\" BIGINT, \"LAST_ACCESS_TIME\" INTEGER NOT NULL, " +
+ " \"OWNER\" VARCHAR(767), \"OWNER_TYPE\" VARCHAR(10), \"RETENTION\" INTEGER NOT NULL, " +
+ " \"SD_ID\" BIGINT, \"TBL_NAME\" VARCHAR(256), \"TBL_TYPE\" VARCHAR(128), " +
+ " \"VIEW_EXPANDED_TEXT\" LONG VARCHAR, \"VIEW_ORIGINAL_TEXT\" LONG VARCHAR, " +
+ " \"IS_REWRITE_ENABLED\" CHAR(1) NOT NULL DEFAULT \'N\', \"TXN_ID\" BIGINT DEFAULT 0, " +
+ " \"WRITEID_LIST\" CLOB, " +
+ " PRIMARY KEY (TBL_ID))"
+ );
+ } catch (SQLException e) {
+ if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+ LOG.info("TBLS table already exist, ignoring");
+ } else {
+ throw e;
+ }
+ }
+
+ try {
+ stmt.execute("CREATE TABLE \"APP\".\"PARTITIONS\" (" +
+ " \"PART_ID\" BIGINT NOT NULL, \"CREATE_TIME\" INTEGER NOT NULL, " +
+ " \"LAST_ACCESS_TIME\" INTEGER NOT NULL, \"PART_NAME\" VARCHAR(767), " +
+ " \"SD_ID\" BIGINT, \"TBL_ID\" BIGINT, \"TXN_ID\" BIGINT DEFAULT 0, " +
+ " \"WRITEID_LIST\" CLOB, " +
+ " PRIMARY KEY (PART_ID))"
+ );
+ } catch (SQLException e) {
+ if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+ LOG.info("PARTITIONS table already exist, ignoring");
+ } else {
+ throw e;
+ }
+ }
+
+ try {
+ stmt.execute("CREATE TABLE \"APP\".\"TABLE_PARAMS\" (" +
+ " \"TBL_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
+ " \"PARAM_VALUE\" CLOB, " +
+ " PRIMARY KEY (TBL_ID, PARAM_KEY))"
+ );
+ } catch (SQLException e) {
+ if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+ LOG.info("TABLE_PARAMS table already exist, ignoring");
+ } else {
+ throw e;
+ }
+ }
+
+ try {
+ stmt.execute("CREATE TABLE \"APP\".\"PARTITION_PARAMS\" (" +
+ " \"PART_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
+ " \"PARAM_VALUE\" VARCHAR(4000), " +
+ " PRIMARY KEY (PART_ID, PARAM_KEY))"
+ );
+ } catch (SQLException e) {
+ if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+ LOG.info("PARTITION_PARAMS table already exist, ignoring");
+ } else {
+ throw e;
+ }
+ }
+
+ try {
stmt.execute("CREATE TABLE \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\" VARCHAR(256) NOT " +
"NULL, \"NEXT_VAL\" BIGINT NOT NULL)"
@@ -376,6 +441,35 @@ public final class TxnDbUtil {
}
/**
+ * Return true if the transaction of the given txnId is open.
+ * @param conf HiveConf
+ * @param txnId transaction id to search for
+ * @return
+ * @throws Exception
+ */
+ public static boolean isOpenOrAbortedTransaction(Configuration conf, long txnId) throws Exception {
+ Connection conn = null;
+ PreparedStatement stmt = null;
+ ResultSet rs = null;
+ try {
+ conn = getConnection(conf);
+ conn.setAutoCommit(false);
+ conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
+
+ stmt = conn.prepareStatement("SELECT txn_id FROM TXNS WHERE txn_id = ?");
+ stmt.setLong(1, txnId);
+ rs = stmt.executeQuery();
+ if (!rs.next()) {
+ return false;
+ } else {
+ return true;
+ }
+ } finally {
+ closeResources(conn, stmt, rs);
+ }
+ }
+
+ /**
* Utility method used to run COUNT queries like "select count(*) from ..." against metastore tables
* @param countQuery countQuery text
* @return count countQuery result
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
index fa291d5..aac5811 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
@@ -25,11 +25,7 @@ import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
-import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.api.*;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.hive.metastore.utils.JavaUtils;
@@ -46,6 +42,12 @@ import java.util.Map;
public class TxnUtils {
private static final Logger LOG = LoggerFactory.getLogger(TxnUtils.class);
+ // Transactional stats states
+ static final public char STAT_OPEN = 'o';
+ static final public char STAT_INVALID = 'i';
+ static final public char STAT_COMMITTED = 'c';
+ static final public char STAT_OBSOLETE = 's';
+
/**
* Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse} to a
* {@link org.apache.hadoop.hive.common.ValidTxnList}. This assumes that the caller intends to
@@ -223,6 +225,14 @@ public class TxnUtils {
return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
}
+ public static boolean isTransactionalTable(Map<String, String> parameters) {
+ if (parameters == null) {
+ return false;
+ }
+ String tableIsTransactional = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
+ return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
+ }
+
/**
* Should produce the same result as
* {@link org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable(org.apache.hadoop.hive.ql.metadata.Table)}.
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/resources/package.jdo
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/resources/package.jdo b/standalone-metastore/src/main/resources/package.jdo
index 1be3e98..3997f53 100644
--- a/standalone-metastore/src/main/resources/package.jdo
+++ b/standalone-metastore/src/main/resources/package.jdo
@@ -210,6 +210,12 @@
<field name="tableType">
<column name="TBL_TYPE" length="128" jdbc-type="VARCHAR"/>
</field>
+ <field name="txnId">
+ <column name="TXN_ID"/>
+ </field>
+ <field name="writeIdList">
+ <column name="WRITEID_LIST" jdbc-type="CLOB" allows-null="true"/>
+ </field>
</class>
<class name="MCreationMetadata" identity-type="datastore" table="MV_CREATION_METADATA" detachable="true">
@@ -489,6 +495,12 @@
<column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
</value>
</field>
+ <field name="txnId">
+ <column name="TXN_ID"/>
+ </field>
+ <field name="writeIdList">
+ <column name="WRITEID_LIST" jdbc-type="CLOB" allows-null="true"/>
+ </field>
</class>
<class name="MIndex" table="IDXS" identity-type="datastore" detachable="true">
@@ -989,6 +1001,9 @@
<field name="lastAnalyzed">
<column name="LAST_ANALYZED" jdbc-type="BIGINT" allows-null="false"/>
</field>
+ <field name="txnId">
+ <column name="TXN_ID"/>
+ </field>
</class>
<class name="MPartitionColumnStatistics" table="PART_COL_STATS" identity-type="datastore" detachable="true">
@@ -1059,6 +1074,9 @@
<field name="lastAnalyzed">
<column name="LAST_ANALYZED" jdbc-type="BIGINT" allows-null="false"/>
</field>
+ <field name="txnId">
+ <column name="TXN_ID"/>
+ </field>
</class>
<class name="MVersionTable" table="VERSION" identity-type="datastore" detachable="true">
<datastore-identity>
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
index e818e1b..280fd4a 100644
--- a/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
+++ b/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
@@ -47,7 +47,7 @@ CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT
CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
-CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
+CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB);
CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER);
@@ -75,7 +75,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "
CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
-CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N');
+CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N', "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB);
CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
@@ -106,7 +106,8 @@ CREATE TABLE "APP"."TAB_COL_STATS"(
"LAST_ANALYZED" BIGINT,
"CS_ID" BIGINT NOT NULL,
"TBL_ID" BIGINT NOT NULL,
- "BIT_VECTOR" BLOB
+ "BIT_VECTOR" BLOB,
+ "TXN_ID" BIGINT DEFAULT 0
);
CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
@@ -155,7 +156,8 @@ CREATE TABLE "APP"."PART_COL_STATS"(
"NUM_FALSES" BIGINT,
"LAST_ANALYZED" BIGINT,
"CS_ID" BIGINT NOT NULL,
- "PART_ID" BIGINT NOT NULL
+ "PART_ID" BIGINT NOT NULL,
+ "TXN_ID" BIGINT DEFAULT 0
);
CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
@@ -373,7 +375,6 @@ ALTER TABLE "APP"."MV_CREATION_METADATA" ADD CONSTRAINT "MV_CREATION_METADATA_PK
ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLG_PK" PRIMARY KEY ("CTLG_ID");
-
-- foreign
ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
index bb69105..f92f13c 100644
--- a/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
+++ b/standalone-metastore/src/main/sql/derby/hive-schema-4.0.0.derby.sql
@@ -47,7 +47,7 @@ CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT
CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
-CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
+CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB);
CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER);
@@ -75,7 +75,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "
CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
-CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N');
+CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N', "TXN_ID" BIGINT DEFAULT 0, "WRITEID_LIST" CLOB);
CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
@@ -106,7 +106,8 @@ CREATE TABLE "APP"."TAB_COL_STATS"(
"LAST_ANALYZED" BIGINT,
"CS_ID" BIGINT NOT NULL,
"TBL_ID" BIGINT NOT NULL,
- "BIT_VECTOR" BLOB
+ "BIT_VECTOR" BLOB,
+ "TXN_ID" BIGINT DEFAULT 0
);
CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
@@ -155,7 +156,8 @@ CREATE TABLE "APP"."PART_COL_STATS"(
"NUM_FALSES" BIGINT,
"LAST_ANALYZED" BIGINT,
"CS_ID" BIGINT NOT NULL,
- "PART_ID" BIGINT NOT NULL
+ "PART_ID" BIGINT NOT NULL,
+ "TXN_ID" BIGINT DEFAULT 0
);
CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
index a511376..94f8192 100644
--- a/standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
+++ b/standalone-metastore/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
@@ -1,5 +1,11 @@
-- Upgrade MetaStore schema from 3.1.0 to 4.0.0
-
+-- HIVE-19416
+ALTER TABLE "APP"."TBLS" ADD WRITEID_LIST CLOB;
+ALTER TABLE "APP"."TBLS" ADD TXN_ID bigint DEFAULT 0;
+ALTER TABLE "APP"."PARTITIONS" ADD WRITEID_LIST CLOB;
+ALTER TABLE "APP"."PARTITIONS" ADD TXN_ID bigint DEFAULT 0;
+ALTER TABLE "APP"."TAB_COL_STATS" ADD TXN_ID bigint DEFAULT 0;
+ALTER TABLE "APP"."PART_COL_STATS" ADD TXN_ID bigint DEFAULT 0;
-- This needs to be the last thing done. Insert any changes above this line.
UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
index c88fb18..f20f910 100644
--- a/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
+++ b/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
@@ -94,7 +94,8 @@ CREATE TABLE PART_COL_STATS
PART_ID bigint NULL,
PARTITION_NAME nvarchar(767) NOT NULL,
"TABLE_NAME" nvarchar(256) NOT NULL,
- "CAT_NAME" nvarchar(256) NOT NULL
+ "CAT_NAME" nvarchar(256) NOT NULL,
+ TXN_ID bigint NULL
);
ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -144,7 +145,9 @@ CREATE TABLE PARTITIONS
LAST_ACCESS_TIME int NOT NULL,
PART_NAME nvarchar(767) NULL,
SD_ID bigint NULL,
- TBL_ID bigint NULL
+ TBL_ID bigint NULL,
+ TXN_ID bigint NULL,
+ WRITEID_LIST text NULL
);
ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
@@ -238,7 +241,8 @@ CREATE TABLE TAB_COL_STATS
NUM_TRUES bigint NULL,
TBL_ID bigint NULL,
"TABLE_NAME" nvarchar(256) NOT NULL,
- "CAT_NAME" nvarchar(256) NOT NULL
+ "CAT_NAME" nvarchar(256) NOT NULL,
+ TXN_ID bigint NULL
);
ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -369,7 +373,9 @@ CREATE TABLE TBLS
TBL_TYPE nvarchar(128) NULL,
VIEW_EXPANDED_TEXT text NULL,
VIEW_ORIGINAL_TEXT text NULL,
- IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0
+ IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0,
+ TXN_ID bigint NULL,
+ WRITEID_LIST text NULL
);
ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
index 922e8fe..22637c5 100644
--- a/standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
+++ b/standalone-metastore/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
@@ -94,7 +94,8 @@ CREATE TABLE PART_COL_STATS
PART_ID bigint NULL,
PARTITION_NAME nvarchar(767) NOT NULL,
"TABLE_NAME" nvarchar(256) NOT NULL,
- "CAT_NAME" nvarchar(256) NOT NULL
+ "CAT_NAME" nvarchar(256) NOT NULL,
+ TXN_ID bigint NULL
);
ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -145,7 +146,9 @@ CREATE TABLE PARTITIONS
LAST_ACCESS_TIME int NOT NULL,
PART_NAME nvarchar(767) NULL,
SD_ID bigint NULL,
- TBL_ID bigint NULL
+ TBL_ID bigint NULL,
+ TXN_ID bigint NULL,
+ WRITEID_LIST text NULL
);
ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
@@ -242,7 +245,8 @@ CREATE TABLE TAB_COL_STATS
NUM_TRUES bigint NULL,
TBL_ID bigint NULL,
"TABLE_NAME" nvarchar(256) NOT NULL,
- "CAT_NAME" nvarchar(256) NOT NULL
+ "CAT_NAME" nvarchar(256) NOT NULL,
+ TXN_ID bigint NULL
);
ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
@@ -377,7 +381,9 @@ CREATE TABLE TBLS
TBL_TYPE nvarchar(128) NULL,
VIEW_EXPANDED_TEXT text NULL,
VIEW_ORIGINAL_TEXT text NULL,
- IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0
+ IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0,
+ TXN_ID bigint NULL,
+ WRITEID_LIST text NULL
);
ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
index 27b7026..f0cbf6c 100644
--- a/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
+++ b/standalone-metastore/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
@@ -1,5 +1,13 @@
SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE;
+-- HIVE-19416
+ALTER TABLE TBLS ADD WRITEID_LIST text NULL;
+ALTER TABLE TBLS ADD TXN_ID bigint NULL;
+ALTER TABLE PARTITIONS ADD WRITEID_LIST text NULL;
+ALTER TABLE PARTITIONS ADD TXN_ID bigint NULL;
+ALTER TABLE TAB_COL_STATS ADD TXN_ID bigint NULL;
+ALTER TABLE PART_COL_STATS ADD TXN_ID bigint NULL;
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
index c54df55..6e34ab5 100644
--- a/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
+++ b/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
@@ -222,6 +222,8 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
`PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
`SD_ID` bigint(20) DEFAULT NULL,
`TBL_ID` bigint(20) DEFAULT NULL,
+ `TXN_ID` bigint(20) DEFAULT 0,
+ `WRITEID_LIST` text DEFAULT NULL,
PRIMARY KEY (`PART_ID`),
UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
KEY `PARTITIONS_N49` (`TBL_ID`),
@@ -625,6 +627,8 @@ CREATE TABLE IF NOT EXISTS `TBLS` (
`VIEW_EXPANDED_TEXT` mediumtext,
`VIEW_ORIGINAL_TEXT` mediumtext,
`IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0,
+ `TXN_ID` bigint(20) DEFAULT 0,
+ `WRITEID_LIST` text DEFAULT NULL,
PRIMARY KEY (`TBL_ID`),
UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
KEY `TBLS_N50` (`SD_ID`),
@@ -720,6 +724,7 @@ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
`NUM_TRUES` bigint(20),
`NUM_FALSES` bigint(20),
`LAST_ANALYZED` bigint(20) NOT NULL,
+ `TXN_ID` bigint(20) DEFAULT 0,
PRIMARY KEY (`CS_ID`),
CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
@@ -750,6 +755,7 @@ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
`NUM_TRUES` bigint(20),
`NUM_FALSES` bigint(20),
`LAST_ANALYZED` bigint(20) NOT NULL,
+ `TXN_ID` bigint(20) DEFAULT 0,
PRIMARY KEY (`CS_ID`),
CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
index 6c40e6e..f8f229d 100644
--- a/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
+++ b/standalone-metastore/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
@@ -224,6 +224,8 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
`PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
`SD_ID` bigint(20) DEFAULT NULL,
`TBL_ID` bigint(20) DEFAULT NULL,
+ `TXN_ID` bigint(20) DEFAULT 0,
+ `WRITEID_LIST` text DEFAULT NULL,
PRIMARY KEY (`PART_ID`),
UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
KEY `PARTITIONS_N49` (`TBL_ID`),
@@ -629,6 +631,8 @@ CREATE TABLE IF NOT EXISTS `TBLS` (
`VIEW_EXPANDED_TEXT` mediumtext,
`VIEW_ORIGINAL_TEXT` mediumtext,
`IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0,
+ `TXN_ID` bigint(20) DEFAULT 0,
+ `WRITEID_LIST` text DEFAULT NULL,
PRIMARY KEY (`TBL_ID`),
UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
KEY `TBLS_N50` (`SD_ID`),
@@ -726,6 +730,7 @@ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
`NUM_TRUES` bigint(20),
`NUM_FALSES` bigint(20),
`LAST_ANALYZED` bigint(20) NOT NULL,
+ `TXN_ID` bigint(20) DEFAULT 0,
PRIMARY KEY (`CS_ID`),
CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
@@ -757,6 +762,7 @@ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
`NUM_TRUES` bigint(20),
`NUM_FALSES` bigint(20),
`LAST_ANALYZED` bigint(20) NOT NULL,
+ `TXN_ID` bigint(20) DEFAULT 0,
PRIMARY KEY (`CS_ID`),
CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
index 9b87563..5877c93 100644
--- a/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
+++ b/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
@@ -323,4 +323,4 @@ ALTER TABLE TXN_COMPONENTS MODIFY COLUMN TC_TABLE varchar(128) NULL;
UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' ';
-ALTER TABLE `TBLS` ADD COLUMN `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL;
\ No newline at end of file
+ALTER TABLE `TBLS` ADD COLUMN `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
index b3789f9..4ca584c 100644
--- a/standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
+++ b/standalone-metastore/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
@@ -1,5 +1,13 @@
SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' ';
+-- HIVE-19416
+ALTER TABLE TBLS ADD TXN_ID bigint;
+ALTER TABLE TBLS ADD WRITEID_LIST CLOB;
+ALTER TABLE PARTITIONS ADD TXN_ID bigint;
+ALTER TABLE PARTITIONS ADD WRITEID_LIST CLOB;
+ALTER TABLE TAB_COL_STATS ADD TXN_ID bigint;
+ALTER TABLE PART_COL_STATS ADD TXN_ID bigint;
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' ';
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
index 63cc1f7..abdb984 100644
--- a/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
+++ b/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
@@ -162,7 +162,9 @@ CREATE TABLE PARTITIONS
LAST_ACCESS_TIME NUMBER (10) NOT NULL,
PART_NAME VARCHAR2(767) NULL,
SD_ID NUMBER NULL,
- TBL_ID NUMBER NULL
+ TBL_ID NUMBER NULL,
+ TXN_ID NUMBER NULL,
+ WRITEID_LIST CLOB NULL
);
ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
@@ -392,7 +394,9 @@ CREATE TABLE TBLS
TBL_TYPE VARCHAR2(128) NULL,
VIEW_EXPANDED_TEXT CLOB NULL,
VIEW_ORIGINAL_TEXT CLOB NULL,
- IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0))
+ IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)),
+ TXN_ID NUMBER NULL,
+ WRITEID_LIST CLOB NULL
);
ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
@@ -525,7 +529,8 @@ CREATE TABLE TAB_COL_STATS (
MAX_COL_LEN NUMBER,
NUM_TRUES NUMBER,
NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
+ LAST_ANALYZED NUMBER NOT NULL,
+ TXN_ID NUMBER NULL
);
CREATE TABLE VERSION (
@@ -563,7 +568,8 @@ CREATE TABLE PART_COL_STATS (
MAX_COL_LEN NUMBER,
NUM_TRUES NUMBER,
NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
+ LAST_ANALYZED NUMBER NOT NULL,
+ TXN_ID NUMBER NULL
);
ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
@@ -1134,7 +1140,6 @@ CREATE TABLE RUNTIME_STATS (
CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
-
-- -----------------------------------------------------------------
-- Record schema version. Should be the last step in the init script
-- -----------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
index e12150a..a143fd2 100644
--- a/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
+++ b/standalone-metastore/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
@@ -163,7 +163,9 @@ CREATE TABLE PARTITIONS
LAST_ACCESS_TIME NUMBER (10) NOT NULL,
PART_NAME VARCHAR2(767) NULL,
SD_ID NUMBER NULL,
- TBL_ID NUMBER NULL
+ TBL_ID NUMBER NULL,
+ TXN_ID NUMBER NULL,
+ WRITEID_LIST CLOB NULL
);
ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
@@ -398,7 +400,9 @@ CREATE TABLE TBLS
TBL_TYPE VARCHAR2(128) NULL,
VIEW_EXPANDED_TEXT CLOB NULL,
VIEW_ORIGINAL_TEXT CLOB NULL,
- IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0))
+ IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)),
+ TXN_ID NUMBER NULL,
+ WRITEID_LIST CLOB NULL
);
ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
@@ -531,7 +535,8 @@ CREATE TABLE TAB_COL_STATS (
MAX_COL_LEN NUMBER,
NUM_TRUES NUMBER,
NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
+ LAST_ANALYZED NUMBER NOT NULL,
+ TXN_ID NUMBER NULL
);
ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
@@ -571,7 +576,8 @@ CREATE TABLE PART_COL_STATS (
MAX_COL_LEN NUMBER,
NUM_TRUES NUMBER,
NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
+ LAST_ANALYZED NUMBER NOT NULL,
+ TXN_ID NUMBER NULL
);
ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
index ce3437f..5b767bc 100644
--- a/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
+++ b/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
@@ -339,4 +339,4 @@ UPDATE COMPLETED_TXN_COMPONENTS SET CTC_WRITEID = CTC_TXNID;
UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;
-ALTER TABLE TBLS ADD OWNER_TYPE VARCHAR2(10) NULL;
\ No newline at end of file
+ALTER TABLE TBLS ADD OWNER_TYPE VARCHAR2(10) NULL;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
index 6fa5e2d..7ac4d40 100644
--- a/standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
+++ b/standalone-metastore/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
@@ -1,5 +1,12 @@
SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual;
+ALTER TABLE TBLS ADD TXN_ID number NULL;
+ALTER TABLE TBLS ADD WRITEID_LIST CLOB NULL;
+ALTER TABLE PARTITIONS ADD TXN_ID number NULL;
+ALTER TABLE PARTITIONS ADD WRITEID_LIST CLOB NULL;
+ALTER TABLE TAB_COL_STATS ADD TXN_ID number NULL;
+ALTER TABLE PART_COL_STATS ADD TXN_ID number NULL;
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual;
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
index 97697f8..449f295 100644
--- a/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
+++ b/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
@@ -166,7 +166,9 @@ CREATE TABLE "PARTITIONS" (
"LAST_ACCESS_TIME" bigint NOT NULL,
"PART_NAME" character varying(767) DEFAULT NULL::character varying,
"SD_ID" bigint,
- "TBL_ID" bigint
+ "TBL_ID" bigint,
+ "TXN_ID" bigint,
+ "WRITEID_LIST" text
);
@@ -388,7 +390,9 @@ CREATE TABLE "TBLS" (
"TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
"VIEW_EXPANDED_TEXT" text,
"VIEW_ORIGINAL_TEXT" text,
- "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false
+ "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false,
+ "TXN_ID" bigint,
+ "WRITEID_LIST" text
);
--
@@ -539,7 +543,8 @@ CREATE TABLE "TAB_COL_STATS" (
"MAX_COL_LEN" bigint,
"NUM_TRUES" bigint,
"NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
+ "LAST_ANALYZED" bigint NOT NULL,
+ "TXN_ID" bigint
);
--
@@ -577,7 +582,8 @@ CREATE TABLE "PART_COL_STATS" (
"MAX_COL_LEN" bigint,
"NUM_TRUES" bigint,
"NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
+ "LAST_ANALYZED" bigint NOT NULL,
+ "TXN_ID" bigint
);
--
@@ -1074,6 +1080,8 @@ ALTER TABLE ONLY "WM_MAPPING"
ALTER TABLE ONLY "WM_MAPPING"
ADD CONSTRAINT "UNIQUE_WM_MAPPING" UNIQUE ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
+-- Transactional table stats PK constraints
+
--
-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
--
@@ -1618,6 +1626,8 @@ ALTER TABLE ONLY "MV_TABLES_USED"
ALTER TABLE ONLY "MV_TABLES_USED"
ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE;
+-- Transactional table stats FK constraints
+
--
-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
--
@@ -1822,7 +1832,6 @@ CREATE TABLE RUNTIME_STATS (
CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
-
-- -----------------------------------------------------------------
-- Record schema version. Should be the last step in the init script
-- -----------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
index b73e1d1..0ead590 100644
--- a/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
+++ b/standalone-metastore/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
@@ -168,7 +168,9 @@ CREATE TABLE "PARTITIONS" (
"LAST_ACCESS_TIME" bigint NOT NULL,
"PART_NAME" character varying(767) DEFAULT NULL::character varying,
"SD_ID" bigint,
- "TBL_ID" bigint
+ "TBL_ID" bigint,
+ "TXN_ID" bigint,
+ "WRITEID_LIST" text
);
@@ -392,7 +394,9 @@ CREATE TABLE "TBLS" (
"TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
"VIEW_EXPANDED_TEXT" text,
"VIEW_ORIGINAL_TEXT" text,
- "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false
+ "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false,
+ "TXN_ID" bigint,
+ "WRITEID_LIST" text
);
--
@@ -545,7 +549,8 @@ CREATE TABLE "TAB_COL_STATS" (
"MAX_COL_LEN" bigint,
"NUM_TRUES" bigint,
"NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
+ "LAST_ANALYZED" bigint NOT NULL,
+ "TXN_ID" bigint
);
--
@@ -583,7 +588,8 @@ CREATE TABLE "PART_COL_STATS" (
"MAX_COL_LEN" bigint,
"NUM_TRUES" bigint,
"NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
+ "LAST_ANALYZED" bigint NOT NULL,
+ "TXN_ID" bigint
);
--
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
index 40d2795..f2bae02 100644
--- a/standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
+++ b/standalone-metastore/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
@@ -1,5 +1,13 @@
SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0';
+-- HIVE-19416
+ALTER TABLE "TBLS" ADD "TXN_ID" bigint;
+ALTER TABLE "TBLS" ADD "WRITEID_LIST" text;
+ALTER TABLE "PARTITIONS" ADD "TXN_ID" bigint;
+ALTER TABLE "PARTITIONS" ADD "WRITEID_LIST" text;
+ALTER TABLE "TAB_COL_STATS" ADD "TXN_ID" bigint;
+ALTER TABLE "PART_COL_STATS" ADD "TXN_ID" bigint;
+
-- These lines need to be last. Insert any changes above.
UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1;
SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0';
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/thrift/hive_metastore.thrift b/standalone-metastore/src/main/thrift/hive_metastore.thrift
index 6e503eb..ecd2001 100644
--- a/standalone-metastore/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/src/main/thrift/hive_metastore.thrift
@@ -233,6 +233,12 @@ enum SchemaVersionState {
DELETED = 8
}
+enum IsolationLevelCompliance {
+ YES = 1,
+ NO = 2,
+ UNKNOWN = 3
+}
+
struct HiveObjectRef{
1: HiveObjectType objectType,
2: string dbName,
@@ -430,7 +436,10 @@ struct Table {
15: optional bool rewriteEnabled, // rewrite enabled or not
16: optional CreationMetadata creationMetadata, // only for MVs, it stores table names used and txn list at MV creation
17: optional string catName, // Name of the catalog the table is in
- 18: optional PrincipalType ownerType = PrincipalType.USER // owner type of this table (default to USER for backward compatibility)
+ 18: optional PrincipalType ownerType = PrincipalType.USER, // owner type of this table (default to USER for backward compatibility)
+ 19: optional i64 txnId=-1,
+ 20: optional string validWriteIdList,
+ 21: optional IsolationLevelCompliance isStatsCompliant
}
struct Partition {
@@ -442,7 +451,10 @@ struct Partition {
6: StorageDescriptor sd,
7: map<string, string> parameters,
8: optional PrincipalPrivilegeSet privileges,
- 9: optional string catName
+ 9: optional string catName,
+ 10: optional i64 txnId=-1,
+ 11: optional string validWriteIdList,
+ 12: optional IsolationLevelCompliance isStatsCompliant
}
struct PartitionWithoutSD {
@@ -469,7 +481,10 @@ struct PartitionSpec {
3: string rootPath,
4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec,
5: optional PartitionListComposingSpec partitionList,
- 6: optional string catName
+ 6: optional string catName,
+ 7: optional i64 txnId=-1,
+ 8: optional string validWriteIdList,
+ 9: optional IsolationLevelCompliance isStatsCompliant
}
// column statistics
@@ -564,17 +579,24 @@ struct ColumnStatisticsDesc {
struct ColumnStatistics {
1: required ColumnStatisticsDesc statsDesc,
-2: required list<ColumnStatisticsObj> statsObj;
+2: required list<ColumnStatisticsObj> statsObj,
+3: optional i64 txnId=-1, // transaction id of the query that sends this structure
+4: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent
+5: optional IsolationLevelCompliance isStatsCompliant // Are the stats isolation-level-compliant with the
+ // the calling query?
}
struct AggrStats {
1: required list<ColumnStatisticsObj> colStats,
-2: required i64 partsFound // number of partitions for which stats were found
+2: required i64 partsFound, // number of partitions for which stats were found
+3: optional IsolationLevelCompliance isStatsCompliant
}
struct SetPartitionsStatsRequest {
1: required list<ColumnStatistics> colStats,
-2: optional bool needMerge //stats need to be merged with the existing stats
+2: optional bool needMerge, //stats need to be merged with the existing stats
+3: optional i64 txnId=-1, // transaction id of the query that sends this structure
+4: optional string validWriteIdList // valid write id list for the table for which this struct is being sent
}
// schema of the table/query results etc.
@@ -703,18 +725,22 @@ struct PartitionsByExprRequest {
}
struct TableStatsResult {
- 1: required list<ColumnStatisticsObj> tableStats
+ 1: required list<ColumnStatisticsObj> tableStats,
+ 2: optional IsolationLevelCompliance isStatsCompliant
}
struct PartitionsStatsResult {
- 1: required map<string, list<ColumnStatisticsObj>> partStats
+ 1: required map<string, list<ColumnStatisticsObj>> partStats,
+ 2: optional IsolationLevelCompliance isStatsCompliant
}
struct TableStatsRequest {
1: required string dbName,
2: required string tblName,
3: required list<string> colNames
- 4: optional string catName
+ 4: optional string catName,
+ 5: optional i64 txnId=-1, // transaction id of the query that sends this structure
+ 6: optional string validWriteIdList // valid write id list for the table for which this struct is being sent
}
struct PartitionsStatsRequest {
@@ -722,12 +748,15 @@ struct PartitionsStatsRequest {
2: required string tblName,
3: required list<string> colNames,
4: required list<string> partNames,
- 5: optional string catName
+ 5: optional string catName,
+ 6: optional i64 txnId=-1, // transaction id of the query that sends this structure
+ 7: optional string validWriteIdList // valid write id list for the table for which this struct is being sent
}
// Return type for add_partitions_req
struct AddPartitionsResult {
1: optional list<Partition> partitions,
+ 2: optional IsolationLevelCompliance isStatsCompliant
}
// Request type for add_partitions_req
@@ -737,7 +766,9 @@ struct AddPartitionsRequest {
3: required list<Partition> parts,
4: required bool ifNotExists,
5: optional bool needResult=true,
- 6: optional string catName
+ 6: optional string catName,
+ 7: optional i64 txnId=-1,
+ 8: optional string validWriteIdList
}
// Return type for drop_partitions_req
@@ -1209,11 +1240,14 @@ struct GetTableRequest {
1: required string dbName,
2: required string tblName,
3: optional ClientCapabilities capabilities,
- 4: optional string catName
+ 4: optional string catName,
+ 5: optional i64 txnId=-1,
+ 6: optional string validWriteIdList
}
struct GetTableResult {
- 1: required Table table
+ 1: required Table table,
+ 2: optional IsolationLevelCompliance isStatsCompliant
}
struct GetTablesRequest {
@@ -1544,6 +1578,18 @@ struct GetRuntimeStatsRequest {
2: required i32 maxCreateTime
}
+struct AlterPartitionsRequest {
+ 1: required string dbName,
+ 2: required string tableName,
+ 3: required list<Partition> partitions,
+ 4: required EnvironmentContext environmentContext,
+ 5: optional i64 txnId=-1,
+ 6: optional string validWriteIdList
+}
+
+struct AlterPartitionsResponse {
+}
+
// Exceptions.
exception MetaException {
@@ -1874,7 +1920,9 @@ service ThriftHiveMetastore extends fb303.FacebookService
// prehooks are fired together followed by all post hooks
void alter_partitions(1:string db_name, 2:string tbl_name, 3:list<Partition> new_parts)
throws (1:InvalidOperationException o1, 2:MetaException o2)
- void alter_partitions_with_environment_context(1:string db_name, 2:string tbl_name, 3:list<Partition> new_parts, 4:EnvironmentContext environment_context) throws (1:InvalidOperationException o1, 2:MetaException o2)
+
+ AlterPartitionsResponse alter_partitions_with_environment_context(1:AlterPartitionsRequest req)
+ throws (1:InvalidOperationException o1, 2:MetaException o2)
void alter_partition_with_environment_context(1:string db_name,
2:string tbl_name, 3:Partition new_part,
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 7c7429d..6985736 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -19,11 +19,7 @@
package org.apache.hadoop.hive.metastore;
import org.apache.hadoop.hive.common.TableName;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.ISchemaName;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+import org.apache.hadoop.hive.metastore.api.*;
import java.nio.ByteBuffer;
import java.util.ArrayList;
@@ -33,58 +29,6 @@ import java.util.Map;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.ISchema;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.RuntimeStat;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SchemaVersion;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
import org.apache.thrift.TException;
@@ -247,6 +191,12 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
}
@Override
+ public Table getTable(String catName, String dbName, String tableName, long txnId, String writeIdList)
+ throws MetaException {
+ return objectStore.getTable(catName, dbName, tableName, txnId, writeIdList);
+ }
+
+ @Override
public boolean addPartition(Partition part)
throws InvalidObjectException, MetaException {
return objectStore.addPartition(part);
@@ -259,6 +209,13 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
}
@Override
+ public Partition getPartition(String catName, String dbName, String tableName,
+ List<String> partVals, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartition(catName, dbName, tableName, partVals, txnId, writeIdList);
+ }
+
+ @Override
public boolean dropPartition(String catName, String dbName, String tableName, List<String> partVals)
throws MetaException, NoSuchObjectException,
InvalidObjectException, InvalidInputException {
@@ -343,9 +300,11 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
@Override
public void alterPartitions(String catName, String dbName, String tblName,
- List<List<String>> partValsList, List<Partition> newParts)
+ List<List<String>> partValsList, List<Partition> newParts,
+ long txnId, String writeIdList)
throws InvalidObjectException, MetaException {
- objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts);
+ objectStore.alterPartitions(
+ catName, dbName, tblName, partValsList, newParts, txnId, writeIdList);
}
@Override
@@ -647,6 +606,15 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
}
@Override
+ public ColumnStatistics getTableColumnStatistics(String catName, String dbName,
+ String tableName, List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getTableColumnStatistics(
+ catName, dbName, tableName, colNames, txnId, writeIdList);
+ }
+
+ @Override
public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
String colName)
throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
@@ -739,6 +707,15 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
}
@Override
+ public List<ColumnStatistics> getPartitionColumnStatistics(
+ String catName, String dbName, String tblName, List<String> partNames,
+ List<String> colNames, long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartitionColumnStatistics(
+ catName, dbName, tblName , colNames, partNames, txnId, writeIdList);
+ }
+
+ @Override
public boolean doesPartitionExist(String catName, String dbName, String tableName,
List<FieldSchema> partKeys, List<String> partVals)
throws MetaException, NoSuchObjectException {
@@ -807,6 +784,15 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
}
@Override
+ public AggrStats get_aggr_stats_for(String catName, String dbName,
+ String tblName, List<String> partNames,
+ List<String> colNames,
+ long txnId, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return null;
+ }
+
+ @Override
public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
return objectStore.getNextNotification(rqst);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index e4f2a17..37e9920 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -19,11 +19,7 @@
package org.apache.hadoop.hive.metastore;
import org.apache.hadoop.hive.common.TableName;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.ISchemaName;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+import org.apache.hadoop.hive.metastore.api.*;
import java.nio.ByteBuffer;
import java.util.Collections;
@@ -31,58 +27,6 @@ import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
-import org.apache.hadoop.hive.metastore.api.ISchema;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.WMNullablePool;
-import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.RuntimeStat;
-import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SchemaVersion;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.WMMapping;
-import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
@@ -243,6 +187,12 @@ public class DummyRawStoreForJdoConnection implements RawStore {
}
@Override
+ public Table getTable(String catalogName, String dbName, String tableName,
+ long txnid, String writeIdList) throws MetaException {
+ return null;
+ }
+
+ @Override
public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
return false;
@@ -256,6 +206,13 @@ public class DummyRawStoreForJdoConnection implements RawStore {
}
@Override
+ public Partition getPartition(String catName, String dbName, String tableName, List<String> part_vals,
+ long txnid, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return null;
+ }
+
+ @Override
public boolean dropPartition(String catName, String dbName, String tableName, List<String> part_vals)
throws MetaException {
@@ -344,10 +301,10 @@ public class DummyRawStoreForJdoConnection implements RawStore {
@Override
public void alterPartitions(String catName, String db_name, String tbl_name,
- List<List<String>> part_vals_list, List<Partition> new_parts)
+ List<List<String>> part_vals_list, List<Partition> new_parts,
+ long txnId, String writeIdList)
throws InvalidObjectException, MetaException {
-
}
@Override
@@ -700,6 +657,14 @@ public class DummyRawStoreForJdoConnection implements RawStore {
}
@Override
+ public ColumnStatistics getTableColumnStatistics(
+ String catName, String dbName, String tableName, List<String> colName,
+ long txnid, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return null;
+ }
+
+ @Override
public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
String colName)
throws NoSuchObjectException, MetaException, InvalidObjectException {
@@ -749,6 +714,14 @@ public class DummyRawStoreForJdoConnection implements RawStore {
}
@Override
+ public List<ColumnStatistics> getPartitionColumnStatistics(
+ String catName, String dbName, String tblName, List<String> partNames,
+ List<String> colNames, long txnid, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return Collections.emptyList();
+ }
+
+ @Override
public boolean doesPartitionExist(String catName, String dbName, String tableName,
List<FieldSchema> partKeys, List<String> partVals)
throws MetaException, NoSuchObjectException {
@@ -812,6 +785,14 @@ public class DummyRawStoreForJdoConnection implements RawStore {
}
@Override
+ public AggrStats get_aggr_stats_for(
+ String catName, String dbName, String tblName, List<String> partNames,
+ List<String> colNames, long txnid, String writeIdList)
+ throws MetaException, NoSuchObjectException {
+ return null;
+ }
+
+ @Override
public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
return null;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index 2d87a2f..3899f03 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore;
import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName;
import java.io.IOException;
import java.lang.reflect.Constructor;
@@ -1429,6 +1430,17 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
return fastpath ? t : deepCopy(filterHook.filterTable(t));
}
+ @Override
+ public Table getTable(String dbName, String tableName, long txnId, String validWriteIdList)
+ throws MetaException, TException, NoSuchObjectException {
+ GetTableRequest req = new GetTableRequest(dbName, tableName);
+ req.setCapabilities(version);
+ req.setTxnId(txnId);
+ req.setValidWriteIdList(validWriteIdList);
+ Table t = client.get_table_req(req).getTable();
+ return fastpath ? t : deepCopy(filterHook.filterTable(t));
+ }
+
/** {@inheritDoc} */
@Override
public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
@@ -1612,13 +1624,33 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
@Override
public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
throws InvalidOperationException, MetaException, TException {
- client.alter_partitions_with_environment_context(dbName, tblName, newParts, null);
+ client.alter_partitions(dbName, tblName, newParts);
}
@Override
public void alter_partitions(String dbName, String tblName, List<Partition> newParts, EnvironmentContext environmentContext)
throws InvalidOperationException, MetaException, TException {
- client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext);
+ AlterPartitionsRequest req = new AlterPartitionsRequest();
+ req.setDbName(dbName);
+ req.setTableName(tblName);
+ req.setPartitions(newParts);
+ req.setEnvironmentContext(environmentContext);
+ client.alter_partitions_with_environment_context(req);
+ }
+
+ @Override
+ public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList)
+ throws InvalidOperationException, MetaException, TException {
+ AlterPartitionsRequest req = new AlterPartitionsRequest();
+ req.setDbName(dbName);
+ req.setTableName(tblName);
+ req.setPartitions(newParts);
+ req.setEnvironmentContext(environmentContext);
+ req.setTxnId(txnId);
+ req.setValidWriteIdList(writeIdList);
+ client.alter_partitions_with_environment_context(req);
}
@Override
@@ -1727,6 +1759,17 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
new TableStatsRequest(dbName, tableName, colNames)).getTableStats();
}
+ @Override
+ public List<ColumnStatisticsObj> getTableColumnStatistics(
+ String dbName, String tableName, List<String> colNames, long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames);
+ tsr.setTxnId(txnId);
+ tsr.setValidWriteIdList(validWriteIdList);
+
+ return client.get_table_statistics_req(tsr).getTableStats();
+ }
+
/** {@inheritDoc} */
@Override
public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
@@ -1736,6 +1779,18 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats();
}
+ @Override
+ public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+ String dbName, String tableName, List<String> partNames,
+ List<String> colNames, long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName, colNames, partNames);
+ psr.setTxnId(txnId);
+ psr.setValidWriteIdList(validWriteIdList);
+ return client.get_partitions_statistics_req(
+ psr).getPartStats();
+ }
+
/** {@inheritDoc} */
@Override
public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName,
@@ -2593,6 +2648,21 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
}
@Override
+ public AggrStats getAggrColStatsFor(
+ String dbName, String tblName, List<String> colNames,
+ List<String> partName, long txnId, String writeIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ if (colNames.isEmpty() || partName.isEmpty()) {
+ LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side.");
+ return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate
+ }
+ PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partName);
+ req.setTxnId(txnId);
+ req.setValidWriteIdList(writeIdList);
+ return client.get_aggr_stats_for(req);
+ }
+
+ @Override
public Iterable<Entry<Long, ByteBuffer>> getFileMetadata(
final List<Long> fileIds) throws TException {
return new MetastoreMapIterable<Long, ByteBuffer>() {
@@ -3000,6 +3070,12 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
}
@Override
+ public Table getTable(String catName, String dbName, String tableName,
+ long txnId, String validWriteIdList) throws TException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public List<Table> getTableObjectsByName(String catName, String dbName,
List<String> tableNames) throws MetaException,
InvalidOperationException, UnknownDBException, TException {
@@ -3226,7 +3302,8 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
@Override
public void alter_partitions(String catName, String dbName, String tblName,
List<Partition> newParts,
- EnvironmentContext environmentContext) throws
+ EnvironmentContext environmentContext,
+ long txnId, String writeIdList) throws
InvalidOperationException, MetaException, TException {
throw new UnsupportedOperationException();
}
@@ -3259,6 +3336,14 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
}
@Override
+ public List<ColumnStatisticsObj> getTableColumnStatistics(
+ String catName, String dbName, String tableName, List<String> colNames,
+ long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(String catName,
String dbName,
String tableName,
@@ -3269,6 +3354,14 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
}
@Override
+ public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+ String catName, String dbName, String tableName, List<String> partNames,
+ List<String> colNames, long txnId, String validWriteIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName,
String partName, String colName) throws
NoSuchObjectException, MetaException, InvalidObjectException, TException,
@@ -3316,6 +3409,14 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
}
@Override
+ public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName,
+ List<String> colNames, List<String> partNames,
+ long txnId, String writeIdList)
+ throws NoSuchObjectException, MetaException, TException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public void dropConstraint(String catName, String dbName, String tableName,
String constraintName) throws MetaException, NoSuchObjectException,
TException {
@@ -3420,4 +3521,5 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
public List<RuntimeStat> getRuntimeStats(int maxWeight, int maxCreateTime) throws TException {
throw new UnsupportedOperationException();
}
+
}
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
index 54bf3d7..f19b505 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
@@ -835,7 +835,8 @@ public class TestAlterPartitions extends MetaStoreClientTest {
public void testAlterPartitionsWithEnvironmentCtxBogusCatalogName() throws Exception {
createTable4PartColsParts(client);
Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
- client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
+ client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext(),
+ -1, null);
}
@Test(expected = InvalidOperationException.class)
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java
index 9867a81..cfe01fe 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidTxnWriteIdList.java
@@ -63,6 +63,10 @@ public class ValidTxnWriteIdList {
return null;
}
+ public boolean isEmpty() {
+ return tablesValidWriteIdList.isEmpty();
+ }
+
// Each ValidWriteIdList is separated with "$" and each one maps to one table
// Format <txnId>$<table_name>:<hwm>:<minOpenWriteId>:<open_writeids>:<abort_writeids>$<table_name>...
private void readFromString(String src) {
http://git-wip-us.apache.org/repos/asf/hive/blob/1d46608e/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java b/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java
index 17f3777..dc50f1b 100644
--- a/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java
+++ b/storage-api/src/java/org/apache/hive/common/util/TxnIdUtils.java
@@ -36,8 +36,24 @@ public class TxnIdUtils {
}
return checkEquivalentCommittedIds(
- older.getHighWatermark(), older.getInvalidWriteIds(),
- newer.getHighWatermark(), newer.getInvalidWriteIds());
+ older.getHighWatermark(), older.getInvalidWriteIds(),
+ newer.getHighWatermark(), newer.getInvalidWriteIds());
+ }
+
+ /**
+ * Check if the give two write id lists are for concurrent writes
+ * on the table.
+ */
+ public static boolean areTheseConcurrentWrites(
+ ValidWriteIdList older, ValidWriteIdList newer, long statsWriteId) {
+ if (!older.getTableName().equalsIgnoreCase(newer.getTableName())) {
+ return false;
+ }
+
+ assert(older.getHighWatermark() <= newer.getHighWatermark());
+
+ // TODO: Just return false for now.
+ return false;
}
/**