You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sz...@apache.org on 2015/01/22 06:05:10 UTC
svn commit: r1653769 [9/14] - in /hive/branches/spark: ./
beeline/src/java/org/apache/hive/beeline/
cli/src/java/org/apache/hadoop/hive/cli/
common/src/java/org/apache/hadoop/hive/common/
common/src/java/org/apache/hadoop/hive/conf/ data/scripts/ dev-s...
Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java Thu Jan 22 05:05:05 2015
@@ -20,15 +20,17 @@ package org.apache.hadoop.hive.ql.proces
import java.sql.SQLException;
-import junit.framework.Assert;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.session.SessionState;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestCommandProcessorFactory {
+ private final String[] testOnlyCommands = new String[]{"crypto"};
+
private HiveConf conf;
@Before
@@ -38,27 +40,39 @@ public class TestCommandProcessorFactory
@Test
public void testInvalidCommands() throws Exception {
- Assert.assertNull("Null should have returned null", CommandProcessorFactory.getForHiveCommand(null, conf));
- Assert.assertNull("Blank should have returned null", CommandProcessorFactory.getForHiveCommand(new String[]{" "}, conf));
- Assert.assertNull("set role should have returned null", CommandProcessorFactory.getForHiveCommand(new String[]{"set role"}, conf));
- Assert.assertNull("SQL should have returned null", CommandProcessorFactory.getForHiveCommand(new String[]{"SELECT * FROM TABLE"}, conf));
+ Assert.assertNull("Null should have returned null",
+ CommandProcessorFactory.getForHiveCommand(null, conf));
+ Assert.assertNull("Blank should have returned null",
+ CommandProcessorFactory.getForHiveCommand(new String[]{" "}, conf));
+ Assert.assertNull("Set role should have returned null",
+ CommandProcessorFactory.getForHiveCommand(new String[]{"set role"}, conf));
+ Assert.assertNull("SQL should have returned null",
+ CommandProcessorFactory.getForHiveCommand(new String[]{"SELECT * FROM TABLE"}, conf));
+ Assert.assertNull("Test only command should have returned null",
+ CommandProcessorFactory.getForHiveCommand(new String[]{"CRYPTO --listZones"}, conf));
}
+
@Test
public void testAvailableCommands() throws Exception {
+ enableTestOnlyCmd(conf);
SessionState.start(conf);
+
for (HiveCommand command : HiveCommand.values()) {
String cmd = command.name();
- Assert.assertNotNull("Cmd " + cmd + " not return null", CommandProcessorFactory.getForHiveCommand(new String[]{cmd}, conf));
- }
- for (HiveCommand command : HiveCommand.values()) {
- String cmd = command.name().toLowerCase();
- Assert.assertNotNull("Cmd " + cmd + " not return null", CommandProcessorFactory.getForHiveCommand(new String[]{cmd}, conf));
+ String cmdInLowerCase = cmd.toLowerCase();
+ Assert.assertNotNull("Cmd " + cmd + " not return null",
+ CommandProcessorFactory
+ .getForHiveCommandInternal(new String[]{cmd}, conf, command.isOnlyForTesting()));
+ Assert.assertNotNull("Cmd " + cmd + " not return null",
+ CommandProcessorFactory.getForHiveCommandInternal(
+ new String[]{cmdInLowerCase}, conf, command.isOnlyForTesting()));
}
conf.set(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.toString(), "");
for (HiveCommand command : HiveCommand.values()) {
String cmd = command.name();
try {
- CommandProcessorFactory.getForHiveCommand(new String[]{cmd}, conf);
+ CommandProcessorFactory
+ .getForHiveCommandInternal(new String[]{cmd}, conf, command.isOnlyForTesting());
Assert.fail("Expected SQLException for " + cmd + " as available commands is empty");
} catch (SQLException e) {
Assert.assertEquals("Insufficient privileges to execute " + cmd, e.getMessage());
@@ -66,4 +80,13 @@ public class TestCommandProcessorFactory
}
}
}
+
+ private void enableTestOnlyCmd(HiveConf conf){
+ StringBuilder securityCMDs = new StringBuilder(conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST));
+ for(String c : testOnlyCommands){
+ securityCMDs.append(",");
+ securityCMDs.append(c);
+ }
+ conf.set(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.toString(), securityCMDs.toString());
+ }
}
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/auto_join21.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/auto_join21.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/auto_join21.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/auto_join21.q Thu Jan 22 05:05:05 2015
@@ -1,4 +1,7 @@
set hive.auto.convert.join = true;
+
+-- SORT_QUERY_RESULTS
+
explain
SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/auto_join23.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/auto_join23.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/auto_join23.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/auto_join23.q Thu Jan 22 05:05:05 2015
@@ -1,5 +1,7 @@
set hive.auto.convert.join = true;
+-- SORT_QUERY_RESULTS
+
explain
SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/constprog_partitioner.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/constprog_partitioner.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/constprog_partitioner.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/constprog_partitioner.q Thu Jan 22 05:05:05 2015
@@ -9,3 +9,16 @@ SELECT src1.key, src1.key + 1, src2.valu
SELECT src1.key, src1.key + 1, src2.value
FROM src src1 join src src2 ON src1.key = src2.key AND src1.key = 100;
+
+EXPLAIN
+SELECT l_partkey, l_suppkey
+FROM lineitem li
+WHERE li.l_linenumber = 1 AND
+ li.l_orderkey IN (SELECT l_orderkey FROM lineitem WHERE l_shipmode = 'AIR' AND l_linenumber = li.l_linenumber)
+;
+
+SELECT l_partkey, l_suppkey
+FROM lineitem li
+WHERE li.l_linenumber = 1 AND
+ li.l_orderkey IN (SELECT l_orderkey FROM lineitem WHERE l_shipmode = 'AIR' AND l_linenumber = li.l_linenumber)
+;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/groupby2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/groupby2.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/groupby2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/groupby2.q Thu Jan 22 05:05:05 2015
@@ -11,4 +11,6 @@ INSERT OVERWRITE TABLE dest_g2 SELECT su
FROM src
INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
+-- SORT_QUERY_RESULTS
+
SELECT dest_g2.* FROM dest_g2;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/groupby_multi_single_reducer2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/groupby_multi_single_reducer2.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/groupby_multi_single_reducer2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/groupby_multi_single_reducer2.q Thu Jan 22 05:05:05 2015
@@ -3,6 +3,8 @@ set hive.multigroupby.singlereducer=true
CREATE TABLE dest_g2(key STRING, c1 INT) STORED AS TEXTFILE;
CREATE TABLE dest_g3(key STRING, c1 INT, c2 INT) STORED AS TEXTFILE;
+-- SORT_QUERY_RESULTS
+
EXPLAIN
FROM src
INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1)
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/groupby_ppr.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/groupby_ppr.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/groupby_ppr.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/groupby_ppr.q Thu Jan 22 05:05:05 2015
@@ -1,6 +1,8 @@
set hive.map.aggr=false;
set hive.groupby.skewindata=false;
+-- SORT_QUERY_RESULTS
+
CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE;
EXPLAIN EXTENDED
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/input14.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/input14.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/input14.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/input14.q Thu Jan 22 05:05:05 2015
@@ -17,4 +17,6 @@ FROM (
) tmap
INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
+-- SORT_QUERY_RESULTS
+
SELECT dest1.* FROM dest1;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/input17.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/input17.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/input17.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/input17.q Thu Jan 22 05:05:05 2015
@@ -17,4 +17,6 @@ FROM (
) tmap
INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue;
+-- SORT_QUERY_RESULTS
+
SELECT dest1.* FROM dest1;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/input18.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/input18.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/input18.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/input18.q Thu Jan 22 05:05:05 2015
@@ -17,4 +17,6 @@ FROM (
) tmap
INSERT OVERWRITE TABLE dest1 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100;
+-- SORT_QUERY_RESULTS
+
SELECT dest1.* FROM dest1;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/input_part2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/input_part2.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/input_part2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/input_part2.q Thu Jan 22 05:05:05 2015
@@ -1,6 +1,8 @@
CREATE TABLE dest1(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE;
CREATE TABLE dest2(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE;
+-- SORT_QUERY_RESULTS
+
EXPLAIN EXTENDED
FROM srcpart
INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12'
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join0.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join0.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join0.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join0.q Thu Jan 22 05:05:05 2015
@@ -1,4 +1,5 @@
-- JAVA_VERSION_SPECIFIC_OUTPUT
+-- SORT_QUERY_RESULTS
EXPLAIN
SELECT src1.key as k1, src1.value as v1,
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join15.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join15.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join15.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join15.q Thu Jan 22 05:05:05 2015
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
EXPLAIN
SELECT * FROM src src1 JOIN src src2 ON (src1.key = src2.key) SORT BY src1.key, src1.value, src2.key, src2.value;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join18.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join18.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join18.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join18.q Thu Jan 22 05:05:05 2015
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
EXPLAIN
SELECT a.key, a.value, b.key, b.value
FROM
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join20.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join20.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join20.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join20.q Thu Jan 22 05:05:05 2015
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
EXPLAIN
SELECT * FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20)
SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join21.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join21.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join21.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join21.q Thu Jan 22 05:05:05 2015
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
EXPLAIN
SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join23.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join23.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join23.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join23.q Thu Jan 22 05:05:05 2015
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
EXPLAIN
SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join6.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join6.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join6.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join6.q Thu Jan 22 05:05:05 2015
@@ -1,5 +1,7 @@
CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE;
+-- SORT_QUERY_RESULTS
+
EXPLAIN
FROM (
FROM
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join7.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join7.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join7.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join7.q Thu Jan 22 05:05:05 2015
@@ -1,5 +1,7 @@
CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT, c6 STRING) STORED AS TEXTFILE;
+-- SORT_QUERY_RESULTS
+
EXPLAIN
FROM (
FROM
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join_array.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join_array.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join_array.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join_array.q Thu Jan 22 05:05:05 2015
@@ -4,6 +4,8 @@ create table tinyB(a bigint, bList array
load data local inpath '../../data/files/tiny_a.txt' into table tinyA;
load data local inpath '../../data/files/tiny_b.txt' into table tinyB;
+-- SORT_QUERY_RESULTS
+
select * from tinyA;
select * from tinyB;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q Thu Jan 22 05:05:05 2015
@@ -1,5 +1,7 @@
set hive.limit.query.max.table.partition=1;
+-- SORT_QUERY_RESULTS
+
explain select ds from srcpart where hr=11 and ds='2008-04-08';
select ds from srcpart where hr=11 and ds='2008-04-08';
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_decimal.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_decimal.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_decimal.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_decimal.q Thu Jan 22 05:05:05 2015
@@ -29,11 +29,6 @@ explain
select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec);
set hive.mapjoin.optimized.hashtable=false;
-set hive.mapjoin.optimized.keys=false;
-
-select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec);
-
-set hive.mapjoin.optimized.keys=true;
select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec);
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_filter_on_outerjoin.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_filter_on_outerjoin.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_filter_on_outerjoin.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_filter_on_outerjoin.q Thu Jan 22 05:05:05 2015
@@ -1,4 +1,7 @@
set hive.auto.convert.join = false;
+
+-- SORT_QUERY_RESULTS
+
--HIVE-2101 mapjoin sometimes gives wrong results if there is a filter in the on condition
SELECT * FROM src1
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_test_outer.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_test_outer.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_test_outer.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/mapjoin_test_outer.q Thu Jan 22 05:05:05 2015
@@ -1,4 +1,7 @@
set hive.auto.convert.join = false;
+
+-- SORT_QUERY_RESULTS
+
--HIVE-2101 mapjoin sometimes gives wrong results if there is a filter in the on condition
create table dest_1 (key STRING, value STRING) stored as textfile;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/nonmr_fetch_threshold.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/nonmr_fetch_threshold.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/nonmr_fetch_threshold.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/nonmr_fetch_threshold.q Thu Jan 22 05:05:05 2015
@@ -7,9 +7,13 @@ set hive.fetch.task.conversion.threshold
explain select * from srcpart where ds='2008-04-08' AND hr='11' limit 10;
explain select cast(key as int) * 10, upper(value) from src limit 10;
+-- Scans without limit (should be Fetch task now)
+explain select concat(key, value) from src;
set hive.fetch.task.conversion.threshold=100;
-- from HIVE-7397, limit + partition pruning filter
explain select * from srcpart where ds='2008-04-08' AND hr='11' limit 10;
explain select cast(key as int) * 10, upper(value) from src limit 10;
+-- Scans without limit (should not be Fetch task now)
+explain select concat(key, value) from src;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ppd_transform.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ppd_transform.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ppd_transform.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ppd_transform.q Thu Jan 22 05:05:05 2015
@@ -2,6 +2,8 @@ set hive.optimize.ppd=true;
set hive.ppd.remove.duplicatefilters=false;
set hive.entity.capture.transform=true;
+-- SORT_QUERY_RESULTS
+
EXPLAIN
FROM (
FROM src
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ptf_matchpath.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ptf_matchpath.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ptf_matchpath.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ptf_matchpath.q Thu Jan 22 05:05:05 2015
@@ -12,6 +12,8 @@ FL_NUM string
LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVERWRITE INTO TABLE flights_tiny;
+-- SORT_QUERY_RESULTS
+
-- 1. basic Matchpath test
select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
from matchpath(on
@@ -43,4 +45,4 @@ from matchpath(on
arg2('LATE'), arg3(arr_delay > 15),
arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
);
-
\ No newline at end of file
+
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ptf_rcfile.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ptf_rcfile.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ptf_rcfile.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ptf_rcfile.q Thu Jan 22 05:05:05 2015
@@ -14,6 +14,8 @@ CREATE TABLE part_rc(
LOAD DATA LOCAL INPATH '../../data/files/part.rc' overwrite into table part_rc;
+-- SORT_QUERY_RESULTS
+
-- testWindowingPTFWithPartRC
select p_mfgr, p_name, p_size,
rank() over (partition by p_mfgr order by p_name) as r,
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ptf_register_tblfn.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ptf_register_tblfn.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ptf_register_tblfn.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ptf_register_tblfn.q Thu Jan 22 05:05:05 2015
@@ -14,6 +14,7 @@ LOAD DATA LOCAL INPATH '../../data/files
create temporary function matchpathtest as 'org.apache.hadoop.hive.ql.udf.ptf.MatchPath$MatchPathResolver';
+-- SORT_QUERY_RESULTS
-- 1. basic Matchpath test
select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ptf_seqfile.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ptf_seqfile.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ptf_seqfile.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ptf_seqfile.q Thu Jan 22 05:05:05 2015
@@ -14,6 +14,8 @@ CREATE TABLE part_seq(
LOAD DATA LOCAL INPATH '../../data/files/part.seq' overwrite into table part_seq;
+-- SORT_QUERY_RESULTS
+
-- testWindowingPTFWithPartSeqFile
select p_mfgr, p_name, p_size,
rank() over (partition by p_mfgr order by p_name) as r,
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/sample3.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/sample3.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/sample3.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/sample3.q Thu Jan 22 05:05:05 2015
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
-- no input pruning, sample filter
EXPLAIN
SELECT s.key
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/sample5.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/sample5.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/sample5.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/sample5.q Thu Jan 22 05:05:05 2015
@@ -1,5 +1,7 @@
CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+-- SORT_QUERY_RESULTS
+
-- no input pruning, sample filter
EXPLAIN EXTENDED
INSERT OVERWRITE TABLE dest1 SELECT s.* -- here's another test
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/scriptfile1.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/scriptfile1.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/scriptfile1.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/scriptfile1.q Thu Jan 22 05:05:05 2015
@@ -1,5 +1,7 @@
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+-- SORT_QUERY_RESULTS
+
-- EXCLUDE_OS_WINDOWS
CREATE TABLE dest1(key INT, value STRING);
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/semijoin.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/semijoin.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/semijoin.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/semijoin.q Thu Jan 22 05:05:05 2015
@@ -1,7 +1,4 @@
-
-
-
-
+-- SORT_QUERY_RESULTS
create table t1 as select cast(key as int) key, value from src where key <= 10;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/smb_mapjoin_11.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/smb_mapjoin_11.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/smb_mapjoin_11.q Thu Jan 22 05:05:05 2015
@@ -29,6 +29,13 @@ INSERT OVERWRITE TABLE test_table3 PARTI
INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1';
+SELECT * FROM test_table1 ORDER BY key;
+SELECT * FROM test_table3 ORDER BY key;
+EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16);
+EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16);
+SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16);
+SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16);
+
-- Join data from a sampled bucket to verify the data is bucketed
SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1';
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/sort.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/sort.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/sort.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/sort.q Thu Jan 22 05:05:05 2015
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
EXPLAIN
SELECT x.* FROM SRC x SORT BY key;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/stats1.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/stats1.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/stats1.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/stats1.q Thu Jan 22 05:05:05 2015
@@ -4,6 +4,8 @@ set hive.merge.mapfiles=false;
set hive.merge.mapredfiles=false;
set hive.map.aggr=true;
+-- SORT_QUERY_RESULTS
+
create table tmptable(key string, value string);
EXPLAIN
@@ -27,4 +29,4 @@ DESCRIBE FORMATTED tmptable;
-- Some stats (numFiles, totalSize) should be updated correctly
-- Some other stats (numRows, rawDataSize) should be cleared
load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable;
-DESCRIBE FORMATTED tmptable;
\ No newline at end of file
+DESCRIBE FORMATTED tmptable;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/transform_ppr1.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/transform_ppr1.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/transform_ppr1.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/transform_ppr1.q Thu Jan 22 05:05:05 2015
@@ -1,6 +1,8 @@
set hive.optimize.ppd=true;
set hive.entity.capture.transform=true;
+-- SORT_QUERY_RESULTS
+
EXPLAIN EXTENDED
FROM (
FROM srcpart src
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/transform_ppr2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/transform_ppr2.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/transform_ppr2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/transform_ppr2.q Thu Jan 22 05:05:05 2015
@@ -1,6 +1,8 @@
set hive.optimize.ppd=true;
set hive.entity.capture.transform=true;
+-- SORT_QUERY_RESULTS
+
EXPLAIN EXTENDED
FROM (
FROM srcpart src
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union10.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union10.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union10.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union10.q Thu Jan 22 05:05:05 2015
@@ -1,5 +1,7 @@
set hive.map.aggr = true;
+-- SORT_QUERY_RESULTS
+
-- union case: all subqueries are a map-reduce jobs, 3 way union, same input for all sub-queries, followed by filesink
create table tmptable(key string, value int);
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union18.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union18.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union18.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union18.q Thu Jan 22 05:05:05 2015
@@ -1,6 +1,8 @@
CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE;
CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE;
+-- SORT_QUERY_RESULTS
+
-- union case:map-reduce sub-queries followed by multi-table insert
explain
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union19.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union19.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union19.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union19.q Thu Jan 22 05:05:05 2015
@@ -1,5 +1,4 @@
-
-
+-- SORT_QUERY_RESULTS
CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE;
CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE;
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union6.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union6.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union6.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union6.q Thu Jan 22 05:05:05 2015
@@ -1,7 +1,8 @@
set hive.map.aggr = true;
--- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by filesink
+-- SORT_QUERY_RESULTS
+-- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by filesink
create table tmptable(key string, value string);
Modified: hive/branches/spark/ql/src/test/queries/clientpositive/union_ppr.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/union_ppr.q?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/union_ppr.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/union_ppr.q Thu Jan 22 05:05:05 2015
@@ -1,3 +1,5 @@
+-- SORT_QUERY_RESULTS
+
EXPLAIN EXTENDED
SELECT * FROM (
SELECT X.* FROM SRCPART X WHERE X.key < 100
Modified: hive/branches/spark/ql/src/test/results/beelinepositive/udf_date_add.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/beelinepositive/udf_date_add.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/beelinepositive/udf_date_add.q.out (original)
+++ hive/branches/spark/ql/src/test/results/beelinepositive/udf_date_add.q.out Thu Jan 22 05:05:05 2015
@@ -9,7 +9,7 @@ Saving all output to "!!{outputDirectory
'date_add(start_date, num_days) - Returns the date that is num_days after start_date.'
'start_date is a string in the format 'yyyy-MM-dd HH:mm:ss' or 'yyyy-MM-dd'. num_days is a number. The time part of start_date is ignored.'
'Example:'
-' > SELECT date_add('2009-30-07', 1) FROM src LIMIT 1;'
-' '2009-31-07''
+' > SELECT date_add('2009-07-30', 1) FROM src LIMIT 1;'
+' '2009-07-31''
5 rows selected
>>> !record
Modified: hive/branches/spark/ql/src/test/results/beelinepositive/udf_date_sub.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/beelinepositive/udf_date_sub.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/beelinepositive/udf_date_sub.q.out (original)
+++ hive/branches/spark/ql/src/test/results/beelinepositive/udf_date_sub.q.out Thu Jan 22 05:05:05 2015
@@ -9,7 +9,7 @@ Saving all output to "!!{outputDirectory
'date_sub(start_date, num_days) - Returns the date that is num_days before start_date.'
'start_date is a string in the format 'yyyy-MM-dd HH:mm:ss' or 'yyyy-MM-dd'. num_days is a number. The time part of start_date is ignored.'
'Example:'
-' > SELECT date_sub('2009-30-07', 1) FROM src LIMIT 1;'
-' '2009-29-07''
+' > SELECT date_sub('2009-07-30', 1) FROM src LIMIT 1;'
+' '2009-07-29''
5 rows selected
>>> !record
Modified: hive/branches/spark/ql/src/test/results/beelinepositive/udf_datediff.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/beelinepositive/udf_datediff.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/beelinepositive/udf_datediff.q.out (original)
+++ hive/branches/spark/ql/src/test/results/beelinepositive/udf_datediff.q.out Thu Jan 22 05:05:05 2015
@@ -9,7 +9,7 @@ Saving all output to "!!{outputDirectory
'datediff(date1, date2) - Returns the number of days between date1 and date2'
'date1 and date2 are strings in the format 'yyyy-MM-dd HH:mm:ss' or 'yyyy-MM-dd'. The time parts are ignored.If date1 is earlier than date2, the result is negative.'
'Example:'
-' > SELECT datediff('2009-30-07', '2009-31-07') FROM src LIMIT 1;'
+' > SELECT datediff('2009-07-30', '2009-07-31') FROM src LIMIT 1;'
' 1'
5 rows selected
>>> !record
Modified: hive/branches/spark/ql/src/test/results/beelinepositive/udf_day.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/beelinepositive/udf_day.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/beelinepositive/udf_day.q.out (original)
+++ hive/branches/spark/ql/src/test/results/beelinepositive/udf_day.q.out Thu Jan 22 05:05:05 2015
@@ -10,7 +10,7 @@ Saving all output to "!!{outputDirectory
'Synonyms: dayofmonth'
'date is a string in the format of 'yyyy-MM-dd HH:mm:ss' or 'yyyy-MM-dd'.'
'Example:'
-' > SELECT day('2009-30-07', 1) FROM src LIMIT 1;'
+' > SELECT day('2009-07-30') FROM src LIMIT 1;'
' 30'
6 rows selected
>>> !record
Modified: hive/branches/spark/ql/src/test/results/beelinepositive/udf_dayofmonth.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/beelinepositive/udf_dayofmonth.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/beelinepositive/udf_dayofmonth.q.out (original)
+++ hive/branches/spark/ql/src/test/results/beelinepositive/udf_dayofmonth.q.out Thu Jan 22 05:05:05 2015
@@ -10,7 +10,7 @@ Saving all output to "!!{outputDirectory
'Synonyms: day'
'date is a string in the format of 'yyyy-MM-dd HH:mm:ss' or 'yyyy-MM-dd'.'
'Example:'
-' > SELECT dayofmonth('2009-30-07', 1) FROM src LIMIT 1;'
+' > SELECT dayofmonth('2009-07-30') FROM src LIMIT 1;'
' 30'
6 rows selected
>>> !record
Modified: hive/branches/spark/ql/src/test/results/beelinepositive/udf_to_date.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/beelinepositive/udf_to_date.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/beelinepositive/udf_to_date.q.out (original)
+++ hive/branches/spark/ql/src/test/results/beelinepositive/udf_to_date.q.out Thu Jan 22 05:05:05 2015
@@ -8,7 +8,7 @@ Saving all output to "!!{outputDirectory
'tab_name'
'to_date(expr) - Extracts the date part of the date or datetime expression expr'
'Example:'
-' > SELECT to_date('2009-30-07 04:17:52') FROM src LIMIT 1;'
-' '2009-30-07''
+' > SELECT to_date('2009-07-30 04:17:52') FROM src LIMIT 1;'
+' '2009-07-30''
4 rows selected
>>> !record
Modified: hive/branches/spark/ql/src/test/results/clientnegative/fs_default_name2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/fs_default_name2.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/fs_default_name2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/fs_default_name2.q.out Thu Jan 22 05:05:05 2015
@@ -1 +1 @@
-FAILED: IllegalArgumentException Illegal character in scheme name at index 0: 'http://www.example.com
+FAILED: SemanticException java.lang.IllegalArgumentException: Illegal character in scheme name at index 0: 'http://www.example.com
Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out Thu Jan 22 05:05:05 2015
@@ -389,8 +389,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 32 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
@@ -446,8 +447,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 24 Data size: 2160 Basic stats: COMPLETE Column stats: COMPLETE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
@@ -503,8 +505,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 8 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
@@ -560,8 +563,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
@@ -617,8 +621,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 24 Data size: 2160 Basic stats: COMPLETE Column stats: COMPLETE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
@@ -674,8 +679,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 32 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
@@ -792,8 +798,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
@@ -908,8 +915,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
@@ -965,8 +973,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
@@ -1022,8 +1031,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
@@ -1079,8 +1089,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
@@ -1136,8 +1147,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
@@ -1193,8 +1205,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
@@ -1307,8 +1320,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: int)
outputColumnNames: _col0, _col1
Modified: hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out Thu Jan 22 05:05:05 2015
@@ -164,8 +164,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 40 Data size: 400 Basic stats: COMPLETE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string)
outputColumnNames: _col0, _col1
@@ -341,8 +342,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 8 Data size: 2064 Basic stats: COMPLETE Column stats: COMPLETE
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 8 Data size: 1384 Basic stats: COMPLETE Column stats: COMPLETE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string)
outputColumnNames: _col0, _col1
@@ -455,8 +457,9 @@ STAGE PLANS:
Group By Operator
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 8 Data size: 2064 Basic stats: COMPLETE Column stats: COMPLETE
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 8 Data size: 1384 Basic stats: COMPLETE Column stats: COMPLETE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string)
outputColumnNames: _col0, _col1
Modified: hive/branches/spark/ql/src/test/results/clientpositive/auto_join21.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/auto_join21.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/auto_join21.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/auto_join21.q.out Thu Jan 22 05:05:05 2015
@@ -1,7 +1,11 @@
-PREHOOK: query: explain
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
Modified: hive/branches/spark/ql/src/test/results/clientpositive/auto_join23.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/auto_join23.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/auto_join23.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/auto_join23.q.out Thu Jan 22 05:05:05 2015
@@ -1,8 +1,12 @@
Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: explain
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value
PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
Modified: hive/branches/spark/ql/src/test/results/clientpositive/cluster.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/cluster.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/cluster.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/cluster.q.out Thu Jan 22 05:05:05 2015
@@ -25,7 +25,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '10' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '10' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string)
Reduce Operator Tree:
@@ -83,7 +83,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string)
Reduce Operator Tree:
@@ -141,7 +141,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string)
Reduce Operator Tree:
@@ -199,7 +199,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string)
Reduce Operator Tree:
@@ -257,7 +257,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string)
Reduce Operator Tree:
@@ -315,7 +315,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string)
Reduce Operator Tree:
@@ -430,7 +430,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string)
Reduce Operator Tree:
@@ -485,7 +485,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: value (type: string)
TableScan
@@ -497,7 +497,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Join Operator
@@ -576,7 +576,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: value (type: string)
TableScan
@@ -588,7 +588,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: value (type: string)
Reduce Operator Tree:
@@ -673,7 +673,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: value (type: string)
TableScan
@@ -685,7 +685,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: value (type: string)
Reduce Operator Tree:
@@ -715,7 +715,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string), _col3 (type: string)
Reduce Operator Tree:
@@ -770,7 +770,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: value (type: string)
TableScan
@@ -782,7 +782,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Join Operator
@@ -811,7 +811,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '20' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '20' (type: string)
Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string), _col2 (type: string)
Reduce Operator Tree:
Modified: hive/branches/spark/ql/src/test/results/clientpositive/constprog2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/constprog2.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/constprog2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/constprog2.q.out Thu Jan 22 05:05:05 2015
@@ -27,7 +27,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '86' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '86' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string)
TableScan
@@ -41,7 +41,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '86' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '86' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Join Operator
@@ -110,7 +110,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '86' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '86' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string)
TableScan
@@ -124,7 +124,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '86' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '86' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Join Operator
Modified: hive/branches/spark/ql/src/test/results/clientpositive/constprog_partitioner.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/constprog_partitioner.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/constprog_partitioner.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/constprog_partitioner.q.out Thu Jan 22 05:05:05 2015
@@ -27,7 +27,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '100' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '100' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
value expressions: _col1 (type: string)
TableScan
@@ -41,7 +41,7 @@ STAGE PLANS:
Reduce Output Operator
key expressions: '100' (type: string)
sort order: +
- Map-reduce partition columns: '' (type: string)
+ Map-reduce partition columns: '100' (type: string)
Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Join Operator
@@ -84,3 +84,102 @@ POSTHOOK: Input: default@src
100 101.0 val_100
100 101.0 val_100
100 101.0 val_100
+PREHOOK: query: EXPLAIN
+SELECT l_partkey, l_suppkey
+FROM lineitem li
+WHERE li.l_linenumber = 1 AND
+ li.l_orderkey IN (SELECT l_orderkey FROM lineitem WHERE l_shipmode = 'AIR' AND l_linenumber = li.l_linenumber)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT l_partkey, l_suppkey
+FROM lineitem li
+WHERE li.l_linenumber = 1 AND
+ li.l_orderkey IN (SELECT l_orderkey FROM lineitem WHERE l_shipmode = 'AIR' AND l_linenumber = li.l_linenumber)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: li
+ Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((l_linenumber = 1) and l_orderkey is not null) (type: boolean)
+ Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: l_orderkey (type: int), l_partkey (type: int), l_suppkey (type: int), 1 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int), _col3 (type: int)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: int), _col3 (type: int)
+ Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: int), _col2 (type: int)
+ TableScan
+ alias: li
+ Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (((l_shipmode = 'AIR') and l_orderkey is not null) and l_linenumber is not null) (type: boolean)
+ Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: l_orderkey (type: int), l_linenumber (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: int), _col1 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int), _col1 (type: int)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+ Statistics: Num rows: 13 Data size: 1559 Basic stats: COMPLETE Column stats: NONE
+ Reduce Operator Tree:
+ Join Operator
+ condition map:
+ Left Semi Join 0 to 1
+ keys:
+ 0 _col0 (type: int), _col3 (type: int)
+ 1 _col0 (type: int), _col1 (type: int)
+ outputColumnNames: _col1, _col2
+ Statistics: Num rows: 27 Data size: 3298 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: int), _col2 (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 27 Data size: 3298 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 27 Data size: 3298 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT l_partkey, l_suppkey
+FROM lineitem li
+WHERE li.l_linenumber = 1 AND
+ li.l_orderkey IN (SELECT l_orderkey FROM lineitem WHERE l_shipmode = 'AIR' AND l_linenumber = li.l_linenumber)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@lineitem
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT l_partkey, l_suppkey
+FROM lineitem li
+WHERE li.l_linenumber = 1 AND
+ li.l_orderkey IN (SELECT l_orderkey FROM lineitem WHERE l_shipmode = 'AIR' AND l_linenumber = li.l_linenumber)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@lineitem
+#### A masked pattern was here ####
+108570 8571
+4297 1798
Modified: hive/branches/spark/ql/src/test/results/clientpositive/correlationoptimizer12.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/correlationoptimizer12.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/correlationoptimizer12.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/correlationoptimizer12.q.out Thu Jan 22 05:05:05 2015
@@ -36,9 +36,11 @@ STAGE PLANS:
sort order: ++
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: string), _col1 (type: string)
+ value expressions: _col1 (type: string)
Reduce Operator Tree:
- Extract
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
PTF Operator
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -109,9 +111,11 @@ STAGE PLANS:
sort order: ++
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: string), _col1 (type: string)
+ value expressions: _col1 (type: string)
Reduce Operator Tree:
- Extract
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
PTF Operator
Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
Modified: hive/branches/spark/ql/src/test/results/clientpositive/ctas_colname.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/ctas_colname.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/ctas_colname.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/ctas_colname.q.out Thu Jan 22 05:05:05 2015
@@ -183,9 +183,10 @@ STAGE PLANS:
sort order: ++
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: string), _col1 (type: string)
Reduce Operator Tree:
- Extract
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
PTF Operator
Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -329,9 +330,10 @@ STAGE PLANS:
sort order: ++
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: string), _col1 (type: string)
Reduce Operator Tree:
- Extract
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
PTF Operator
Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby2.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby2.q.out Thu Jan 22 05:05:05 2015
@@ -81,11 +81,15 @@ POSTHOOK: Output: default@dest_g2
POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: SELECT dest_g2.* FROM dest_g2
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+SELECT dest_g2.* FROM dest_g2
PREHOOK: type: QUERY
PREHOOK: Input: default@dest_g2
#### A masked pattern was here ####
-POSTHOOK: query: SELECT dest_g2.* FROM dest_g2
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+SELECT dest_g2.* FROM dest_g2
POSTHOOK: type: QUERY
POSTHOOK: Input: default@dest_g2
#### A masked pattern was here ####
Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_cube1.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_cube1.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_cube1.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_cube1.q.out Thu Jan 22 05:05:05 2015
@@ -56,8 +56,9 @@ STAGE PLANS:
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2, _col3
+ outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2
@@ -223,8 +224,9 @@ STAGE PLANS:
aggregations: count(DISTINCT KEY._col2:0._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
+ outputColumnNames: _col0, _col2
Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col2 (type: bigint)
outputColumnNames: _col0, _col1
@@ -320,8 +322,9 @@ STAGE PLANS:
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
mode: final
- outputColumnNames: _col0, _col1, _col2, _col3
+ outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2
@@ -403,8 +406,9 @@ STAGE PLANS:
aggregations: count(DISTINCT KEY._col2:0._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: string)
mode: complete
- outputColumnNames: _col0, _col1, _col2
+ outputColumnNames: _col0, _col2
Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col2 (type: bigint)
outputColumnNames: _col0, _col1
@@ -541,8 +545,9 @@ STAGE PLANS:
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
mode: final
- outputColumnNames: _col0, _col1, _col2, _col3
+ outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int)
outputColumnNames: _col0, _col1, _col2
@@ -608,8 +613,9 @@ STAGE PLANS:
aggregations: sum(VALUE._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
mode: final
- outputColumnNames: _col0, _col1, _col2, _col3
+ outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int)
outputColumnNames: _col0, _col1, _col2
Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out Thu Jan 22 05:05:05 2015
@@ -79,8 +79,9 @@ STAGE PLANS:
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
mode: final
- outputColumnNames: _col0, _col1, _col2, _col3
+ outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2
@@ -185,8 +186,9 @@ STAGE PLANS:
aggregations: sum(VALUE._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
mode: final
- outputColumnNames: _col0, _col1, _col2, _col3
+ outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: double)
outputColumnNames: _col0, _col1, _col2
@@ -313,8 +315,9 @@ STAGE PLANS:
aggregations: sum(VALUE._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
mode: final
- outputColumnNames: _col0, _col1, _col2, _col3
+ outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 12 Data size: 84 Basic stats: COMPLETE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2
Modified: hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out?rev=1653769&r1=1653768&r2=1653769&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out Thu Jan 22 05:05:05 2015
@@ -74,8 +74,9 @@ STAGE PLANS:
aggregations: avg(VALUE._col0), count(VALUE._col1)
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
mode: mergepartial
- outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ outputColumnNames: _col0, _col1, _col3, _col4
Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: double), _col4 (type: bigint)
outputColumnNames: _col0, _col1, _col2, _col3
@@ -185,8 +186,9 @@ STAGE PLANS:
aggregations: avg(VALUE._col0), count(VALUE._col1)
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
mode: final
- outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ outputColumnNames: _col0, _col1, _col3, _col4
Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+ pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: double), _col4 (type: bigint)
outputColumnNames: _col0, _col1, _col2, _col3