You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by om...@apache.org on 2017/07/20 20:09:56 UTC
[38/43] hive git commit: HIVE-16787 Fix itests in branch-2.2
http://git-wip-us.apache.org/repos/asf/hive/blob/62a3778e/ql/src/test/results/clientpositive/druid_intervals.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_intervals.q.out b/ql/src/test/results/clientpositive/druid_intervals.q.out
index 28a5d41..a1c2d62 100644
--- a/ql/src/test/results/clientpositive/druid_intervals.q.out
+++ b/ql/src/test/results/clientpositive/druid_intervals.q.out
@@ -78,14 +78,6 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
-<<<<<<< HEAD
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
- druid.query.type SELECT
-=======
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
- druid.query.type select
->>>>>>> 539426a425... HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: __time (type: timestamp)
@@ -113,14 +105,6 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
-<<<<<<< HEAD
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/2012-03-01T00:00:00.000Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
- druid.query.type SELECT
-=======
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/2012-03-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
- druid.query.type select
->>>>>>> 539426a425... HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: __time (type: timestamp)
@@ -148,14 +132,6 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
-<<<<<<< HEAD
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2012-03-01T00:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
- druid.query.type SELECT
-=======
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2012-03-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
- druid.query.type select
->>>>>>> 539426a425... HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: __time (type: timestamp)
@@ -185,14 +161,6 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
-<<<<<<< HEAD
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2011-01-01T00:00:00.000Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
- druid.query.type SELECT
-=======
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
- druid.query.type select
->>>>>>> 539426a425... HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: __time (type: timestamp)
@@ -220,14 +188,6 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
-<<<<<<< HEAD
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2011-01-01T00:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
- druid.query.type SELECT
-=======
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
- druid.query.type select
->>>>>>> 539426a425... HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: __time (type: timestamp)
@@ -257,14 +217,6 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
-<<<<<<< HEAD
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2011-01-01T00:00:00.001Z","2012-01-01T00:00:00.000Z/2013-01-01T00:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
- druid.query.type SELECT
-=======
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.001","2012-01-01T00:00:00.000/2013-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
- druid.query.type select
->>>>>>> 539426a425... HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: __time (type: timestamp)
@@ -294,14 +246,6 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
-<<<<<<< HEAD
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2012-01-01T00:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
- druid.query.type SELECT
-=======
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2012-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
- druid.query.type select
->>>>>>> 539426a425... HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: __time (type: timestamp)
@@ -329,14 +273,6 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
-<<<<<<< HEAD
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2010-01-01T00:00:00.001Z","2011-01-01T00:00:00.000Z/2011-01-01T00:00:00.001Z"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
- druid.query.type SELECT
-=======
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2010-01-01T00:00:00.001","2011-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
- druid.query.type select
->>>>>>> 539426a425... HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: __time (type: timestamp)
@@ -364,14 +300,6 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
-<<<<<<< HEAD
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000Z/2010-01-01T00:00:00.001Z","2011-01-01T00:00:00.000Z/2011-01-01T00:00:00.001Z"],"filter":{"type":"selector","dimension":"robot","value":"user1"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
- druid.query.type SELECT
-=======
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2010-01-01T00:00:00.001","2011-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"filter":{"type":"selector","dimension":"robot","value":"user1"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
- druid.query.type select
->>>>>>> 539426a425... HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
expressions: __time (type: timestamp), 'user1' (type: string)
@@ -400,14 +328,6 @@ STAGE PLANS:
TableScan
alias: druid_table_1
filterExpr: ((__time) IN ('2010-01-01 00:00:00', '2011-01-01 00:00:00') or (robot = 'user1')) (type: boolean)
- properties:
-<<<<<<< HEAD
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
- druid.query.type SELECT
-=======
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
- druid.query.type select
->>>>>>> 539426a425... HIVE-13316: Upgrade to Calcite 1.10 (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator
predicate: ((__time) IN ('2010-01-01 00:00:00', '2011-01-01 00:00:00') or (robot = 'user1')) (type: boolean)
http://git-wip-us.apache.org/repos/asf/hive/blob/62a3778e/ql/src/test/results/clientpositive/druid_timeseries.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_timeseries.q.out b/ql/src/test/results/clientpositive/druid_timeseries.q.out
index 4ecf67f..95a4c50 100644
--- a/ql/src/test/results/clientpositive/druid_timeseries.q.out
+++ b/ql/src/test/results/clientpositive/druid_timeseries.q.out
@@ -78,12 +78,9 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"all","aggregations":[{"type":"doubleMax","name":"$f0","fieldName":"added"},{"type":"doubleSum","name":"$f1","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
- druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: $f0 (type: float), $f1 (type: float)
+ expressions: $f0 (type: bigint), $f1 (type: float)
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -108,12 +105,9 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"none","aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
- druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -138,12 +132,9 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"year","aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
- druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -168,12 +159,9 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"quarter","aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
- druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -198,12 +186,9 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"month","aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
- druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -228,12 +213,9 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"week","aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
- druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -258,12 +240,9 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"day","aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
- druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -288,12 +267,9 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"hour","aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
- druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -318,12 +294,9 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"minute","aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
- druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -348,12 +321,9 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"second","aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
- druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -380,12 +350,9 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"hour","filter":{"type":"selector","dimension":"robot","value":"1"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
- druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+ expressions: __time (type: timestamp), $f1 (type: bigint), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -417,9 +384,6 @@ STAGE PLANS:
TableScan
alias: druid_table_1
filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
- properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
- druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator
predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
@@ -494,9 +458,6 @@ STAGE PLANS:
TableScan
alias: druid_table_1
filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
- properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
- druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator
predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
http://git-wip-us.apache.org/repos/asf/hive/blob/62a3778e/ql/src/test/results/clientpositive/druid_topn.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_topn.q.out b/ql/src/test/results/clientpositive/druid_topn.q.out
index 387c4c3..f2152e2 100644
--- a/ql/src/test/results/clientpositive/druid_topn.q.out
+++ b/ql/src/test/results/clientpositive/druid_topn.q.out
@@ -84,12 +84,9 @@ STAGE PLANS:
Processor Tree:
TableScan
alias: druid_table_1
- properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":["robot"],"limitSpec":{"type":"default","limit":100,"columns":[{"dimension":"$f1","direction":"descending"}]},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
- druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
- expressions: robot (type: string), $f1 (type: float), $f2 (type: float)
+ expressions: robot (type: string), $f1 (type: bigint), $f2 (type: float)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
ListSink
@@ -109,54 +106,21 @@ ORDER BY s DESC
LIMIT 100
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: druid_table_1
- properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"none","dimensions":["robot"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
- druid.query.type groupBy
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: __time (type: timestamp), robot (type: string), $f2 (type: float), $f3 (type: float)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Reduce Output Operator
- key expressions: _col3 (type: float)
- sort order: -
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- TopN Hash Memory Usage: 0.1
- value expressions: _col0 (type: timestamp), _col1 (type: string), _col2 (type: float)
- Reduce Operator Tree:
- Select Operator
- expressions: VALUE._col0 (type: timestamp), VALUE._col1 (type: string), VALUE._col2 (type: float), KEY.reducesinkkey0 (type: float)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Limit
- Number of rows: 100
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: _col1 (type: string), _col0 (type: timestamp), _col2 (type: float), _col3 (type: float)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
- ListSink
+ TableScan
+ alias: druid_table_1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: robot (type: string), __time (type: timestamp), $f2 (type: bigint), $f3 (type: float)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ ListSink
PREHOOK: query: EXPLAIN
SELECT robot, floor_year(`__time`), max(added), sum(variation) as s
@@ -173,50 +137,21 @@ ORDER BY s DESC
LIMIT 10
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: druid_table_1
- properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"year","dimensions":["robot"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
- druid.query.type groupBy
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: robot (type: string), __time (type: timestamp), $f2 (type: float), $f3 (type: float)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Reduce Output Operator
- key expressions: _col3 (type: float)
- sort order: -
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- TopN Hash Memory Usage: 0.1
- value expressions: _col0 (type: string), _col1 (type: timestamp), _col2 (type: float)
- Reduce Operator Tree:
- Select Operator
- expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp), VALUE._col2 (type: float), KEY.reducesinkkey0 (type: float)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Limit
- Number of rows: 10
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
- limit: 10
+ limit: -1
Processor Tree:
- ListSink
+ TableScan
+ alias: druid_table_1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: robot (type: string), __time (type: timestamp), $f2 (type: bigint), $f3 (type: float)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ ListSink
PREHOOK: query: EXPLAIN
SELECT robot, floor_month(`__time`), max(added), sum(variation) as s
@@ -233,50 +168,21 @@ ORDER BY s
LIMIT 10
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: druid_table_1
- properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"month","dimensions":["robot"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
- druid.query.type groupBy
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: robot (type: string), __time (type: timestamp), $f2 (type: float), $f3 (type: float)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Reduce Output Operator
- key expressions: _col3 (type: float)
- sort order: +
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- TopN Hash Memory Usage: 0.1
- value expressions: _col0 (type: string), _col1 (type: timestamp), _col2 (type: float)
- Reduce Operator Tree:
- Select Operator
- expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp), VALUE._col2 (type: float), KEY.reducesinkkey0 (type: float)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Limit
- Number of rows: 10
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
- limit: 10
+ limit: -1
Processor Tree:
- ListSink
+ TableScan
+ alias: druid_table_1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: robot (type: string), __time (type: timestamp), $f2 (type: bigint), $f3 (type: float)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ ListSink
PREHOOK: query: EXPLAIN
SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s
@@ -293,54 +199,21 @@ ORDER BY s DESC, m DESC
LIMIT 10
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: druid_table_1
- properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"month","dimensions":["robot","namespace"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
- druid.query.type groupBy
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: robot (type: string), __time (type: timestamp), $f3 (type: float), $f4 (type: float)
- outputColumnNames: _col0, _col2, _col3, _col4
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Reduce Output Operator
- key expressions: _col4 (type: float), _col3 (type: float)
- sort order: --
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- TopN Hash Memory Usage: 0.1
- value expressions: _col0 (type: string), _col2 (type: timestamp)
- Reduce Operator Tree:
- Select Operator
- expressions: VALUE._col0 (type: string), VALUE._col2 (type: timestamp), KEY.reducesinkkey1 (type: float), KEY.reducesinkkey0 (type: float)
- outputColumnNames: _col0, _col2, _col3, _col4
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Limit
- Number of rows: 10
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: _col0 (type: string), _col2 (type: timestamp), _col3 (type: float), _col4 (type: float)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
- ListSink
+ TableScan
+ alias: druid_table_1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: robot (type: string), __time (type: timestamp), $f3 (type: bigint), $f4 (type: float)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ ListSink
PREHOOK: query: EXPLAIN
SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s
@@ -357,54 +230,21 @@ ORDER BY robot ASC, m DESC
LIMIT 10
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: druid_table_1
- properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"month","dimensions":["robot","namespace"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
- druid.query.type groupBy
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: robot (type: string), __time (type: timestamp), $f3 (type: float), $f4 (type: float)
- outputColumnNames: _col0, _col2, _col3, _col4
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string), _col3 (type: float)
- sort order: +-
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- TopN Hash Memory Usage: 0.1
- value expressions: _col2 (type: timestamp), _col4 (type: float)
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type: string), VALUE._col1 (type: timestamp), KEY.reducesinkkey1 (type: float), VALUE._col2 (type: float)
- outputColumnNames: _col0, _col2, _col3, _col4
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Limit
- Number of rows: 10
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: _col0 (type: string), _col2 (type: timestamp), _col3 (type: float), _col4 (type: float)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
- ListSink
+ TableScan
+ alias: druid_table_1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: robot (type: string), __time (type: timestamp), $f3 (type: bigint), $f4 (type: float)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ ListSink
PREHOOK: query: EXPLAIN
SELECT robot, floor_year(`__time`), max(added), sum(variation) as s
@@ -423,54 +263,21 @@ ORDER BY s
LIMIT 10
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: druid_table_1
- properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"year","filter":{"type":"selector","dimension":"robot","value":"1"},"aggregations":[{"type":"doubleMax","name":"$f1_0","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
- druid.query.type timeseries
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: __time (type: timestamp), $f1_0 (type: float), $f2 (type: float)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Reduce Output Operator
- key expressions: _col2 (type: float)
- sort order: +
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- TopN Hash Memory Usage: 0.1
- value expressions: _col0 (type: timestamp), _col1 (type: float)
- Reduce Operator Tree:
- Select Operator
- expressions: VALUE._col0 (type: timestamp), VALUE._col1 (type: float), KEY.reducesinkkey0 (type: float)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Limit
- Number of rows: 10
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: '1' (type: string), _col0 (type: timestamp), _col1 (type: float), _col2 (type: float)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
- ListSink
+ TableScan
+ alias: druid_table_1
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator
+ expressions: '1' (type: string), __time (type: timestamp), $f1_0 (type: bigint), $f2 (type: float)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ ListSink
PREHOOK: query: EXPLAIN
SELECT robot, floor_hour(`__time`), max(added) as m, sum(variation)
@@ -504,9 +311,6 @@ STAGE PLANS:
TableScan
alias: druid_table_1
filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
- properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
- druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator
predicate: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
http://git-wip-us.apache.org/repos/asf/hive/blob/62a3778e/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
index d1dc317..ca05dd4 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization_acid.q.out
@@ -78,27 +78,21 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid
- Statistics: Num rows: 1725 Data size: 5177 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 862 Data size: 2586 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: _col0
- Statistics: Num rows: 862 Data size: 2586 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 862 Data size: 2586 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 862 Data size: 2586 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 862 Data size: 2586 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -156,28 +150,22 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid
- Statistics: Num rows: 1945 Data size: 5835 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 972 Data size: 2916 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), ds (type: string)
outputColumnNames: _col0, _col3
- Statistics: Num rows: 972 Data size: 2916 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 972 Data size: 2916 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: string)
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), VALUE._col1 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 972 Data size: 2916 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 972 Data size: 2916 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -325,27 +313,21 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid
- Statistics: Num rows: 1566 Data size: 4698 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 783 Data size: 2349 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: _col0
- Statistics: Num rows: 783 Data size: 2349 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 783 Data size: 2349 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 783 Data size: 2349 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 783 Data size: 2349 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -403,27 +385,21 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid
- Statistics: Num rows: 1785 Data size: 5357 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 892 Data size: 2676 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), ds (type: string)
outputColumnNames: _col0, _col3
- Statistics: Num rows: 892 Data size: 2676 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col3 (type: string), '_bucket_number' (type: string), _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
sort order: +++
Map-reduce partition columns: _col3 (type: string)
- Statistics: Num rows: 892 Data size: 2676 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Select Operator
expressions: KEY._col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), KEY._col3 (type: string), KEY.'_bucket_number' (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, '_bucket_number'
- Statistics: Num rows: 892 Data size: 2676 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 892 Data size: 2676 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -577,27 +553,21 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid
- Statistics: Num rows: 1547 Data size: 4642 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 773 Data size: 2319 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: _col0
- Statistics: Num rows: 773 Data size: 2319 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 773 Data size: 2319 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
- Statistics: Num rows: 773 Data size: 2319 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 773 Data size: 2319 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -656,28 +626,22 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid
- Statistics: Num rows: 3032 Data size: 9099 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 1516 Data size: 4549 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), hr (type: int)
outputColumnNames: _col0, _col4
- Statistics: Num rows: 1516 Data size: 4549 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 1516 Data size: 4549 Basic stats: COMPLETE Column stats: NONE
value expressions: _col4 (type: int)
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), VALUE._col2 (type: int)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
- Statistics: Num rows: 1516 Data size: 4549 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 1516 Data size: 4549 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -932,27 +896,21 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid
- Statistics: Num rows: 1548 Data size: 4644 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 774 Data size: 2322 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: _col0
- Statistics: Num rows: 774 Data size: 2322 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 774 Data size: 2322 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
- Statistics: Num rows: 774 Data size: 2322 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 774 Data size: 2322 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1011,27 +969,21 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid
- Statistics: Num rows: 3034 Data size: 9103 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 1517 Data size: 4551 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), hr (type: int)
outputColumnNames: _col0, _col4
- Statistics: Num rows: 1517 Data size: 4551 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: '2008-04-08' (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
sort order: ++++
Map-reduce partition columns: '2008-04-08' (type: string), _col4 (type: int)
- Statistics: Num rows: 1517 Data size: 4551 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Select Operator
expressions: KEY._col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number'
- Statistics: Num rows: 1517 Data size: 4551 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 1517 Data size: 4551 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1285,28 +1237,22 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid
- Statistics: Num rows: 46 Data size: 4644 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 23 Data size: 2322 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), key (type: string), ds (type: string), hr (type: int)
outputColumnNames: _col0, _col1, _col3, _col4
- Statistics: Num rows: 23 Data size: 2322 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col3 (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
sort order: ++++
Map-reduce partition columns: _col3 (type: string), _col4 (type: int)
- Statistics: Num rows: 23 Data size: 2322 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string), 'bar' (type: string)
Reduce Operator Tree:
Select Operator
expressions: KEY._col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY._col3 (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number'
- Statistics: Num rows: 23 Data size: 2322 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 23 Data size: 2322 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1365,28 +1311,22 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid
- Statistics: Num rows: 90 Data size: 9101 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 45 Data size: 4550 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), key (type: string), ds (type: string), hr (type: int)
outputColumnNames: _col0, _col1, _col3, _col4
- Statistics: Num rows: 45 Data size: 4550 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col3 (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
sort order: ++++
Map-reduce partition columns: _col3 (type: string), _col4 (type: int)
- Statistics: Num rows: 45 Data size: 4550 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string), 'bar' (type: string)
Reduce Operator Tree:
Select Operator
expressions: KEY._col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY._col3 (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number'
- Statistics: Num rows: 45 Data size: 4550 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 45 Data size: 4550 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
http://git-wip-us.apache.org/repos/asf/hive/blob/62a3778e/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out
index 7a91538..4c62587 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out
@@ -1,12 +1,6 @@
-PREHOOK: query: --SORT_QUERY_RESULTS
-
--- Java JCE must be installed in order to hava a key length of 256 bits
-DROP TABLE IF EXISTS table_key_1 PURGE
+PREHOOK: query: DROP TABLE IF EXISTS table_key_1 PURGE
PREHOOK: type: DROPTABLE
-POSTHOOK: query: --SORT_QUERY_RESULTS
-
--- Java JCE must be installed in order to hava a key length of 256 bits
-DROP TABLE IF EXISTS table_key_1 PURGE
+POSTHOOK: query: DROP TABLE IF EXISTS table_key_1 PURGE
POSTHOOK: type: DROPTABLE
#### A masked pattern was here ####
PREHOOK: type: CREATETABLE
http://git-wip-us.apache.org/repos/asf/hive/blob/62a3778e/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
index 5cea8f2..bdbb567 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
@@ -49,7 +49,7 @@ PREHOOK: query: ALTER TABLE default.encrypted_table RENAME TO encrypted_db.encry
PREHOOK: type: ALTERTABLE_RENAME
PREHOOK: Input: default@encrypted_table
PREHOOK: Output: default@encrypted_table
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. Alter Table operation for default.encrypted_table failed to move data due to: '/build/ql/test/data/warehouse/encrypted_table can't be moved into an encryption zone.' See hive log file for details.
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. Alter Table operation for default.encrypted_table failed to move data due to: '/build/ql/test/data/warehouse/default/encrypted_table can't be moved into an encryption zone.' See hive log file for details.
PREHOOK: query: SHOW TABLES
PREHOOK: type: SHOWTABLES
PREHOOK: Input: database:default
http://git-wip-us.apache.org/repos/asf/hive/blob/62a3778e/ql/src/test/results/clientpositive/explain_logical.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/explain_logical.q.out b/ql/src/test/results/clientpositive/explain_logical.q.out
index fb91a3b..74eed93 100644
--- a/ql/src/test/results/clientpositive/explain_logical.q.out
+++ b/ql/src/test/results/clientpositive/explain_logical.q.out
@@ -263,8 +263,6 @@ LOGICAL PLAN:
src
TableScan (TS_0)
alias: src
- properties:
- insideView TRUE
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator (SEL_1)
expressions: key (type: string), value (type: string)
@@ -280,8 +278,6 @@ LOGICAL PLAN:
srcpart
TableScan (TS_0)
alias: srcpart
- properties:
- insideView TRUE
Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
Select Operator (SEL_2)
expressions: ds (type: string), key (type: string), value (type: string)
@@ -297,8 +293,6 @@ LOGICAL PLAN:
$hdt$_0:srcpart
TableScan (TS_0)
alias: srcpart
- properties:
- insideView TRUE
Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
Filter Operator (FIL_11)
predicate: key is not null (type: boolean)
@@ -334,8 +328,6 @@ $hdt$_0:srcpart
$hdt$_1:src2
TableScan (TS_3)
alias: src2
- properties:
- insideView TRUE
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Filter Operator (FIL_12)
predicate: key is not null (type: boolean)
@@ -367,8 +359,6 @@ LOGICAL PLAN:
$hdt$_0:srcpart
TableScan (TS_0)
alias: srcpart
- properties:
- insideView TRUE
Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
Filter Operator (FIL_15)
predicate: key is not null (type: boolean)
@@ -407,8 +397,6 @@ $hdt$_0:srcpart
$hdt$_1:src
TableScan (TS_3)
alias: src
- properties:
- insideView TRUE
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Filter Operator (FIL_16)
predicate: key is not null (type: boolean)
@@ -435,8 +423,6 @@ $hdt$_1:src
$hdt$_2:src3
TableScan (TS_6)
alias: src3
- properties:
- insideView TRUE
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Filter Operator (FIL_17)
predicate: key is not null (type: boolean)
@@ -484,8 +470,6 @@ LOGICAL PLAN:
srcpart
TableScan (TS_0)
alias: srcpart
- properties:
- insideView TRUE
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator (FIL_4)
predicate: (ds = '10') (type: boolean)
http://git-wip-us.apache.org/repos/asf/hive/blob/62a3778e/ql/src/test/results/clientpositive/groupby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby2.q.out b/ql/src/test/results/clientpositive/groupby2.q.out
index f623add..006e62f 100644
--- a/ql/src/test/results/clientpositive/groupby2.q.out
+++ b/ql/src/test/results/clientpositive/groupby2.q.out
@@ -26,23 +26,23 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: substr(key, 1, 1) (type: string), substr(value, 5) (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
sort order: ++
Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Group By Operator
aggregations: count(DISTINCT KEY._col1:0._col0), sum(KEY._col1:0._col0)
keys: KEY._col0 (type: string)
mode: partial1
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
table:
@@ -58,7 +58,7 @@ STAGE PLANS:
key expressions: _col0 (type: string)
sort order: +
Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: bigint), _col2 (type: double)
Reduce Operator Tree:
Group By Operator
@@ -66,14 +66,14 @@ STAGE PLANS:
keys: KEY._col0 (type: string)
mode: final
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), concat(_col0, _col2) (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
http://git-wip-us.apache.org/repos/asf/hive/blob/62a3778e/ql/src/test/results/clientpositive/join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join1.q.out b/ql/src/test/results/clientpositive/join1.q.out
index 5d54ee3..8843661 100644
--- a/ql/src/test/results/clientpositive/join1.q.out
+++ b/ql/src/test/results/clientpositive/join1.q.out
@@ -1,12 +1,8 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
-
-CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
+PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@dest_j1
-POSTHOOK: query: -- SORT_QUERY_RESULTS
-
-CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
+POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@dest_j1
@@ -29,34 +25,34 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src1
- Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: key is not null (type: boolean)
- Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string)
outputColumnNames: _col0
- Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
TableScan
alias: src2
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: key is not null (type: boolean)
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), value (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string)
Reduce Operator Tree:
Join Operator
@@ -66,14 +62,14 @@ STAGE PLANS:
0 _col0 (type: string)
1 _col0 (type: string)
outputColumnNames: _col0, _col2
- Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: UDFToInteger(_col0) (type: int), _col2 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
http://git-wip-us.apache.org/repos/asf/hive/blob/62a3778e/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
index 0bf9963..602068e 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
@@ -171,54 +171,6 @@ POSTHOOK: query: show partitions list_bucketing_mul_col
POSTHOOK: type: SHOWPARTITIONS
POSTHOOK: Input: default@list_bucketing_mul_col
ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_mul_col
-# col_name data_type comment
-
-col1 string
-col2 string
-col3 string
-col4 string
-col5 string
-
-# Partition Information
-# col_name data_type comment
-
-ds string
-hr string
-
-# Detailed Partition Information
-Partition Value: [2008-04-08, 11]
-Database: default
-Table: list_bucketing_mul_col
-#### A masked pattern was here ####
-Partition Parameters:
- COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
- numFiles 4
- numRows 500
- rawDataSize 6312
- totalSize 7094
-#### A masked pattern was here ####
-
-# Storage Information
-SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-Compressed: No
-Num Buckets: -1
-Bucket Columns: []
-Sort Columns: []
-Stored As SubDirectories: Yes
-Skewed Columns: [col2, col4]
-Skewed Values: [[466, val_466], [287, val_287], [82, val_82]]
-#### A masked pattern was here ####
-Skewed Value to Truncated Path: {[466, val_466]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=466/col4=val_466, [82, val_82]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=82/col4=val_82, [287, val_287]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=287/col4=val_287}
-Storage Desc Params:
- serialization.format 1
PREHOOK: query: explain extended
select * from list_bucketing_mul_col
where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
http://git-wip-us.apache.org/repos/asf/hive/blob/62a3778e/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out
index defdcbc..01daa63 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out
@@ -171,54 +171,6 @@ POSTHOOK: query: show partitions list_bucketing_mul_col
POSTHOOK: type: SHOWPARTITIONS
POSTHOOK: Input: default@list_bucketing_mul_col
ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
-PREHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='2013-01-23+18:00:99')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='2013-01-23+18:00:99')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_mul_col
-# col_name data_type comment
-
-col1 string
-col2 string
-col3 string
-col4 string
-col5 string
-
-# Partition Information
-# col_name data_type comment
-
-ds string
-hr string
-
-# Detailed Partition Information
-Partition Value: [2008-04-08, 2013-01-23+18:00:99]
-Database: default
-Table: list_bucketing_mul_col
-#### A masked pattern was here ####
-Partition Parameters:
- COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
- numFiles 4
- numRows 500
- rawDataSize 6312
- totalSize 7094
-#### A masked pattern was here ####
-
-# Storage Information
-SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-Compressed: No
-Num Buckets: -1
-Bucket Columns: []
-Sort Columns: []
-Stored As SubDirectories: Yes
-Skewed Columns: [col2, col4]
-Skewed Values: [[466, val_466], [287, val_287], [82, val_82]]
-#### A masked pattern was here ####
-Skewed Value to Truncated Path: {[466, val_466]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=466/col4=val_466, [82, val_82]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=82/col4=val_82, [287, val_287]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=287/col4=val_287}
-Storage Desc Params:
- serialization.format 1
PREHOOK: query: explain extended
select * from list_bucketing_mul_col
where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = "val_466"