You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/11/07 21:41:45 UTC

svn commit: r1637444 [9/20] - in /hive/branches/spark: ./ cli/src/test/org/apache/hadoop/hive/cli/ common/ common/src/java/org/apache/hadoop/hive/common/type/ common/src/java/org/apache/hadoop/hive/conf/ common/src/test/org/apache/hadoop/hive/conf/ com...

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual2.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual2.q Fri Nov  7 20:41:34 2014
@@ -1,20 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 create table part2( 
     p2_partkey INT,
     p2_name STRING,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q Fri Nov  7 20:41:34 2014
@@ -1,20 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 create table part2( 
     p2_partkey INT,
     p2_name STRING,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q Fri Nov  7 20:41:34 2014
@@ -1,20 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 create table part2( 
     p2_partkey INT,
     p2_name STRING,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/join_merging.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/join_merging.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/join_merging.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/join_merging.q Fri Nov  7 20:41:34 2014
@@ -1,17 +1,4 @@
 
-
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
 explain select p1.p_size, p2.p_size 
 from part p1 left outer join part p2 on p1.p_partkey = p2.p_partkey 
   right outer join part p3 on p2.p_partkey = p3.p_partkey and 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/leadlag.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/leadlag.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/leadlag.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/leadlag.q Fri Nov  7 20:41:34 2014
@@ -1,20 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 --1. testLagWithPTFWindowing
 select p_mfgr, p_name,
 rank() over (partition by p_mfgr order by p_name) as r,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/leadlag_queries.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/leadlag_queries.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/leadlag_queries.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/leadlag_queries.q Fri Nov  7 20:41:34 2014
@@ -1,18 +1,3 @@
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 -- 1. testLeadUDAF
 select p_mfgr, p_retailprice,
 lead(p_retailprice) over (partition by p_mfgr order by p_name) as l1,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge1.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge1.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge1.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge1.q Fri Nov  7 20:41:34 2014
@@ -31,7 +31,7 @@ INSERT OVERWRITE TABLE orcfile_merge1 PA
     SELECT key, value, PMOD(HASH(key), 2) as part
     FROM src;
 
-DESC FORMATTED orcfile_merge1 partition (ds='1', part='0');
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/ds=1/part=0/;
 
 set hive.merge.tezfiles=true;
 set hive.merge.mapfiles=true;
@@ -46,7 +46,7 @@ INSERT OVERWRITE TABLE orcfile_merge1b P
     SELECT key, value, PMOD(HASH(key), 2) as part
     FROM src;
 
-DESC FORMATTED orcfile_merge1b partition (ds='1', part='0');
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1b/ds=1/part=0/;
 
 set hive.merge.orcfile.stripe.level=true;
 -- auto-merge fast way
@@ -59,7 +59,7 @@ INSERT OVERWRITE TABLE orcfile_merge1c P
     SELECT key, value, PMOD(HASH(key), 2) as part
     FROM src;
 
-DESC FORMATTED orcfile_merge1c partition (ds='1', part='0');
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1c/ds=1/part=0/;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 -- Verify

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge2.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge2.q Fri Nov  7 20:41:34 2014
@@ -18,7 +18,7 @@ INSERT OVERWRITE TABLE orcfile_merge2a P
         PMOD(HASH(value), 10) as three
     FROM src;
 
-DESC FORMATTED orcfile_merge2a partition (one='1', two='0', three='2');
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge2a/one=1/two=0/three=2/;
 
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c)

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge3.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge3.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge3.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge3.q Fri Nov  7 20:41:34 2014
@@ -19,7 +19,7 @@ EXPLAIN INSERT OVERWRITE TABLE orcfile_m
 INSERT OVERWRITE TABLE orcfile_merge3b
     SELECT key, value FROM orcfile_merge3a;
 
-DESC FORMATTED orcfile_merge3b;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge3b/;
 
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge4.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge4.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge4.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge4.q Fri Nov  7 20:41:34 2014
@@ -11,7 +11,7 @@ set hive.merge.mapfiles=false;
 INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
     SELECT * FROM src;
 
-DESC FORMATTED orcfile_merge3a PARTITION (ds='1');
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge3a/ds=1/;
 
 set hive.merge.mapfiles=true;
 INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
@@ -20,8 +20,8 @@ INSERT OVERWRITE TABLE orcfile_merge3a P
 INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2')
     SELECT * FROM src;
 
-DESC FORMATTED orcfile_merge3a PARTITION (ds='1');
-DESC FORMATTED orcfile_merge3a PARTITION (ds='2');
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge3a/ds=1/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge3a/ds=2/;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge5.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge5.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge5.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge5.q Fri Nov  7 20:41:34 2014
@@ -23,7 +23,7 @@ insert overwrite table orc_merge5b selec
 
 -- 3 files total
 analyze table orc_merge5b compute statistics noscan;
-desc formatted orc_merge5b;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/;
 select * from orc_merge5b;
 
 set hive.merge.orcfile.stripe.level=true;
@@ -37,7 +37,7 @@ insert overwrite table orc_merge5b selec
 
 -- 1 file after merging
 analyze table orc_merge5b compute statistics noscan;
-desc formatted orc_merge5b;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/;
 select * from orc_merge5b;
 
 set hive.merge.orcfile.stripe.level=false;
@@ -47,7 +47,7 @@ set hive.merge.mapredfiles=false;
 
 insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
 analyze table orc_merge5b compute statistics noscan;
-desc formatted orc_merge5b;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/;
 select * from orc_merge5b;
 
 set hive.merge.orcfile.stripe.level=true;
@@ -56,6 +56,6 @@ alter table orc_merge5b concatenate;
 
 -- 1 file after merging
 analyze table orc_merge5b compute statistics noscan;
-desc formatted orc_merge5b;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/;
 select * from orc_merge5b;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge6.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge6.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge6.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge6.q Fri Nov  7 20:41:34 2014
@@ -26,8 +26,8 @@ insert overwrite table orc_merge5a parti
 -- 3 files total
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan;
 analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan;
-desc formatted orc_merge5a partition(year="2000",hour=24);
-desc formatted orc_merge5a partition(year="2001",hour=24);
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/;
 show partitions orc_merge5a;
 select * from orc_merge5a;
 
@@ -44,8 +44,8 @@ insert overwrite table orc_merge5a parti
 -- 1 file after merging
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan;
 analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan;
-desc formatted orc_merge5a partition(year="2000",hour=24);
-desc formatted orc_merge5a partition(year="2001",hour=24);
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/;
 show partitions orc_merge5a;
 select * from orc_merge5a;
 
@@ -58,8 +58,8 @@ insert overwrite table orc_merge5a parti
 insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan;
 analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan;
-desc formatted orc_merge5a partition(year="2000",hour=24);
-desc formatted orc_merge5a partition(year="2001",hour=24);
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/;
 show partitions orc_merge5a;
 select * from orc_merge5a;
 
@@ -71,8 +71,8 @@ alter table orc_merge5a partition(year="
 -- 1 file after merging
 analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan;
 analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan;
-desc formatted orc_merge5a partition(year="2000",hour=24);
-desc formatted orc_merge5a partition(year="2001",hour=24);
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/;
 show partitions orc_merge5a;
 select * from orc_merge5a;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge7.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge7.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge7.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge7.q Fri Nov  7 20:41:34 2014
@@ -30,8 +30,8 @@ insert overwrite table orc_merge5a parti
 -- 3 files total
 analyze table orc_merge5a partition(st=80.0) compute statistics noscan;
 analyze table orc_merge5a partition(st=0.8) compute statistics noscan;
-desc formatted orc_merge5a partition(st=80.0);
-desc formatted orc_merge5a partition(st=0.8);
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/;
 show partitions orc_merge5a;
 select * from orc_merge5a where userid<=13;
 
@@ -48,8 +48,8 @@ insert overwrite table orc_merge5a parti
 -- 1 file after merging
 analyze table orc_merge5a partition(st=80.0) compute statistics noscan;
 analyze table orc_merge5a partition(st=0.8) compute statistics noscan;
-desc formatted orc_merge5a partition(st=80.0);
-desc formatted orc_merge5a partition(st=0.8);
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/;
 show partitions orc_merge5a;
 select * from orc_merge5a where userid<=13;
 
@@ -62,8 +62,8 @@ insert overwrite table orc_merge5a parti
 insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
 analyze table orc_merge5a partition(st=80.0) compute statistics noscan;
 analyze table orc_merge5a partition(st=0.8) compute statistics noscan;
-desc formatted orc_merge5a partition(st=80.0);
-desc formatted orc_merge5a partition(st=0.8);
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/;
 show partitions orc_merge5a;
 select * from orc_merge5a where userid<=13;
 
@@ -75,8 +75,8 @@ alter table orc_merge5a partition(st=0.8
 -- 1 file after merging
 analyze table orc_merge5a partition(st=80.0) compute statistics noscan;
 analyze table orc_merge5a partition(st=0.8) compute statistics noscan;
-desc formatted orc_merge5a partition(st=80.0);
-desc formatted orc_merge5a partition(st=0.8);
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/;
 show partitions orc_merge5a;
 select * from orc_merge5a where userid<=13;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge_incompat1.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge_incompat1.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge_incompat1.q Fri Nov  7 20:41:34 2014
@@ -22,7 +22,7 @@ insert into table orc_merge5b select use
 
 -- 5 files total
 analyze table orc_merge5b compute statistics noscan;
-desc formatted orc_merge5b;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/;
 select * from orc_merge5b;
 
 set hive.merge.orcfile.stripe.level=true;
@@ -30,6 +30,6 @@ alter table orc_merge5b concatenate;
 
 -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind
 analyze table orc_merge5b compute statistics noscan;
-desc formatted orc_merge5b;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/;
 select * from orc_merge5b;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge_incompat2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge_incompat2.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge_incompat2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/orc_merge_incompat2.q Fri Nov  7 20:41:34 2014
@@ -32,8 +32,8 @@ insert into table orc_merge5a partition 
 
 analyze table orc_merge5a partition(st=80.0) compute statistics noscan;
 analyze table orc_merge5a partition(st=0.8) compute statistics noscan;
-desc formatted orc_merge5a partition(st=80.0);
-desc formatted orc_merge5a partition(st=0.8);
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/;
 show partitions orc_merge5a;
 select * from orc_merge5a where userid<=13;
 
@@ -44,8 +44,8 @@ alter table orc_merge5a partition(st=0.8
 
 analyze table orc_merge5a partition(st=80.0) compute statistics noscan;
 analyze table orc_merge5a partition(st=0.8) compute statistics noscan;
-desc formatted orc_merge5a partition(st=80.0);
-desc formatted orc_merge5a partition(st=0.8);
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/;
 show partitions orc_merge5a;
 select * from orc_merge5a where userid<=13;
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/order_within_subquery.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/order_within_subquery.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/order_within_subquery.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/order_within_subquery.q Fri Nov  7 20:41:34 2014
@@ -1,18 +1,3 @@
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
-
 select t1.p_name, t2.p_name 
 from (select * from part order by p_size limit 10) t1 join part t2 on t1.p_partkey = t2.p_partkey and t1.p_size = t2.p_size 
 where t1.p_partkey < 100000;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ptf.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ptf.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ptf.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ptf.q Fri Nov  7 20:41:34 2014
@@ -1,20 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 -- SORT_QUERY_RESULTS
 
 --1. test1

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ptf_decimal.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ptf_decimal.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ptf_decimal.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ptf_decimal.q Fri Nov  7 20:41:34 2014
@@ -1,19 +1,3 @@
-DROP TABLE IF EXISTS part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DECIMAL(6,2),
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
 
 -- 1. aggregate functions with decimal type
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ptf_general_queries.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ptf_general_queries.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ptf_general_queries.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ptf_general_queries.q Fri Nov  7 20:41:34 2014
@@ -1,20 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 -- 1. testNoPTFNoWindowing
 select p_mfgr, p_name, p_size
 from part

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ptf_streaming.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ptf_streaming.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ptf_streaming.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ptf_streaming.q Fri Nov  7 20:41:34 2014
@@ -1,20 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 create temporary function noopstreaming as 'org.apache.hadoop.hive.ql.udf.ptf.NoopStreaming$NoopStreamingResolver';
 
 --1. test1

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q Fri Nov  7 20:41:34 2014
@@ -1,8 +1,8 @@
 set hive.stats.dbclass=fs;
 set hive.stats.autogather=true;
 
-DROP TABLE lineitem;
-CREATE TABLE lineitem (L_ORDERKEY      INT,
+DROP TABLE IF EXISTS lineitem_ix;
+CREATE TABLE lineitem_ix (L_ORDERKEY      INT,
                                 L_PARTKEY       INT,
                                 L_SUPPKEY       INT,
                                 L_LINENUMBER    INT,
@@ -21,28 +21,28 @@ CREATE TABLE lineitem (L_ORDERKEY      I
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '|';
 
-LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem;
+LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem_ix;
 
-CREATE INDEX lineitem_lshipdate_idx ON TABLE lineitem(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)");
-ALTER INDEX lineitem_lshipdate_idx ON lineitem REBUILD;
+CREATE INDEX lineitem_ix_lshipdate_idx ON TABLE lineitem_ix(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)");
+ALTER INDEX lineitem_ix_lshipdate_idx ON lineitem_ix REBUILD;
 
 explain select l_shipdate, count(l_shipdate)
-from lineitem
+from lineitem_ix
 group by l_shipdate;
 
 select l_shipdate, count(l_shipdate)
-from lineitem
+from lineitem_ix
 group by l_shipdate
 order by l_shipdate;
 
 set hive.optimize.index.groupby=true;
 
 explain select l_shipdate, count(l_shipdate)
-from lineitem
+from lineitem_ix
 group by l_shipdate;
 
 select l_shipdate, count(l_shipdate)
-from lineitem
+from lineitem_ix
 group by l_shipdate
 order by l_shipdate;
 
@@ -52,14 +52,14 @@ set hive.optimize.index.groupby=false;
 explain select year(l_shipdate) as year,
         month(l_shipdate) as month,
         count(l_shipdate) as monthly_shipments
-from lineitem
+from lineitem_ix
 group by year(l_shipdate), month(l_shipdate) 
 order by year, month;
 
 select year(l_shipdate) as year,
         month(l_shipdate) as month,
         count(l_shipdate) as monthly_shipments
-from lineitem
+from lineitem_ix
 group by year(l_shipdate), month(l_shipdate) 
 order by year, month;
 
@@ -68,14 +68,14 @@ set hive.optimize.index.groupby=true;
 explain select year(l_shipdate) as year,
         month(l_shipdate) as month,
         count(l_shipdate) as monthly_shipments
-from lineitem
+from lineitem_ix
 group by year(l_shipdate), month(l_shipdate) 
 order by year, month;
 
 select year(l_shipdate) as year,
         month(l_shipdate) as month,
         count(l_shipdate) as monthly_shipments
-from lineitem
+from lineitem_ix
 group by year(l_shipdate), month(l_shipdate) 
 order by year, month;
 
@@ -86,24 +86,24 @@ lastyear.monthly_shipments as monthly_sh
    from (select year(l_shipdate) as year,
                 month(l_shipdate) as month,
                 count(l_shipdate) as monthly_shipments
-           from lineitem
+           from lineitem_ix
           where year(l_shipdate) = 1997
           group by year(l_shipdate), month(l_shipdate)
         )  lastyear join
         (select year(l_shipdate) as year,
                 month(l_shipdate) as month,
                 count(l_shipdate) as monthly_shipments
-           from lineitem
+           from lineitem_ix
           where year(l_shipdate) = 1998
           group by year(l_shipdate), month(l_shipdate)
         )  thisyear
   on lastyear.month = thisyear.month;
 
 explain  select l_shipdate, cnt
-from (select l_shipdate, count(l_shipdate) as cnt from lineitem group by l_shipdate
+from (select l_shipdate, count(l_shipdate) as cnt from lineitem_ix group by l_shipdate
 union all
 select l_shipdate, l_orderkey as cnt
-from lineitem) dummy;
+from lineitem_ix) dummy;
 
 CREATE TABLE tbl(key int, value int);
 CREATE INDEX tbl_key_idx ON TABLE tbl(key) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(key)");

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/reducesink_dedup.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/reducesink_dedup.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/reducesink_dedup.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/reducesink_dedup.q Fri Nov  7 20:41:34 2014
@@ -1,19 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-
 select p_name 
 from (select p_name from part distribute by 1 sort by 1) p 
 distribute by 1 sort by 1

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/stats_noscan_2.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/stats_noscan_2.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/stats_noscan_2.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/stats_noscan_2.q Fri Nov  7 20:41:34 2014
@@ -1,7 +1,8 @@
+dfs -cp ${system:hive.root}/data/files/ext_test ${system:test.tmp.dir}/analyze_external;
 
 -- test analyze table compute statistiscs [noscan] on external table 
 -- 1 test table
-CREATE EXTERNAL TABLE anaylyze_external (a INT) LOCATION '${system:hive.root}/data/files/ext_test';
+CREATE EXTERNAL TABLE anaylyze_external (a INT) LOCATION '${system:test.tmp.dir}/analyze_external';
 SELECT * FROM anaylyze_external;
 analyze table anaylyze_external compute statistics noscan;
 describe formatted anaylyze_external;

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/subquery_in.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/subquery_in.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/subquery_in.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/subquery_in.q Fri Nov  7 20:41:34 2014
@@ -1,44 +1,5 @@
 -- SORT_QUERY_RESULTS
 
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
-DROP TABLE lineitem;
-CREATE TABLE lineitem (L_ORDERKEY      INT,
-                                L_PARTKEY       INT,
-                                L_SUPPKEY       INT,
-                                L_LINENUMBER    INT,
-                                L_QUANTITY      DOUBLE,
-                                L_EXTENDEDPRICE DOUBLE,
-                                L_DISCOUNT      DOUBLE,
-                                L_TAX           DOUBLE,
-                                L_RETURNFLAG    STRING,
-                                L_LINESTATUS    STRING,
-                                l_shipdate      STRING,
-                                L_COMMITDATE    STRING,
-                                L_RECEIPTDATE   STRING,
-                                L_SHIPINSTRUCT  STRING,
-                                L_SHIPMODE      STRING,
-                                L_COMMENT       STRING)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY '|';
-
-LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem;
-
 -- non agg, non corr
 explain
  select * 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/subquery_in_explain_rewrite.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/subquery_in_explain_rewrite.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/subquery_in_explain_rewrite.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/subquery_in_explain_rewrite.q Fri Nov  7 20:41:34 2014
@@ -1,39 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-DROP TABLE lineitem;
-CREATE TABLE lineitem (L_ORDERKEY      INT,
-                                L_PARTKEY       INT,
-                                L_SUPPKEY       INT,
-                                L_LINENUMBER    INT,
-                                L_QUANTITY      DOUBLE,
-                                L_EXTENDEDPRICE DOUBLE,
-                                L_DISCOUNT      DOUBLE,
-                                L_TAX           DOUBLE,
-                                L_RETURNFLAG    STRING,
-                                L_LINESTATUS    STRING,
-                                l_shipdate      STRING,
-                                L_COMMITDATE    STRING,
-                                L_RECEIPTDATE   STRING,
-                                L_SHIPINSTRUCT  STRING,
-                                L_SHIPMODE      STRING,
-                                L_COMMENT       STRING)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY '|';
-
-
 -- non agg, non corr
 explain rewrite
  select * 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/subquery_in_having.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/subquery_in_having.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/subquery_in_having.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/subquery_in_having.q Fri Nov  7 20:41:34 2014
@@ -1,7 +1,9 @@
 -- SORT_QUERY_RESULTS
 
 -- data setup
-CREATE TABLE part( 
+DROP TABLE IF EXISTS part_subq;
+
+CREATE TABLE part_subq( 
     p_partkey INT,
     p_name STRING,
     p_mfgr STRING,
@@ -13,7 +15,7 @@ CREATE TABLE part( 
     p_comment STRING
 );
 
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part_subq;
 
 -- non agg, non corr
 explain
@@ -45,11 +47,11 @@ set hive.optimize.correlation=false;
 -- agg, non corr
 explain
 select p_mfgr, avg(p_size)
-from part b
+from part_subq b
 group by b.p_mfgr
 having b.p_mfgr in 
    (select p_mfgr 
-    from part
+    from part_subq
     group by p_mfgr
     having max(p_size) - min(p_size) < 20
    )
@@ -60,11 +62,11 @@ set hive.optimize.correlation=true;
 -- agg, non corr
 explain
 select p_mfgr, avg(p_size)
-from part b
+from part_subq b
 group by b.p_mfgr
 having b.p_mfgr in
    (select p_mfgr
-    from part
+    from part_subq
     group by p_mfgr
     having max(p_size) - min(p_size) < 20
    )
@@ -113,8 +115,10 @@ having count(*) in (select count(*) from
 -- non agg, non corr, windowing
 explain
 select p_mfgr, p_name, avg(p_size) 
-from part 
+from part_subq 
 group by p_mfgr, p_name
 having p_name in 
-  (select first_value(p_name) over(partition by p_mfgr order by p_size) from part)
+  (select first_value(p_name) over(partition by p_mfgr order by p_size) from part_subq)
 ;
+
+DROP TABLE part_subq;
\ No newline at end of file

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/subquery_notin.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/subquery_notin.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/subquery_notin.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/subquery_notin.q Fri Nov  7 20:41:34 2014
@@ -1,42 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
-DROP TABLE lineitem;
-CREATE TABLE lineitem (L_ORDERKEY      INT,
-                                L_PARTKEY       INT,
-                                L_SUPPKEY       INT,
-                                L_LINENUMBER    INT,
-                                L_QUANTITY      DOUBLE,
-                                L_EXTENDEDPRICE DOUBLE,
-                                L_DISCOUNT      DOUBLE,
-                                L_TAX           DOUBLE,
-                                L_RETURNFLAG    STRING,
-                                L_LINESTATUS    STRING,
-                                l_shipdate      STRING,
-                                L_COMMITDATE    STRING,
-                                L_RECEIPTDATE   STRING,
-                                L_SHIPINSTRUCT  STRING,
-                                L_SHIPMODE      STRING,
-                                L_COMMENT       STRING)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY '|';
-
-LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem;
-
 -- non agg, non corr
 explain
 select * 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/subquery_notin_having.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/subquery_notin_having.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/subquery_notin_having.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/subquery_notin_having.q Fri Nov  7 20:41:34 2014
@@ -1,21 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
-
 -- non agg, non corr
 explain
 select key, count(*) 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q Fri Nov  7 20:41:34 2014
@@ -1,18 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
 create table src11 (key1 string, value1 string);
 
 create table part2( 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/temp_table_windowing_expressions.q Fri Nov  7 20:41:34 2014
@@ -1,20 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TEMPORARY TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 drop table over10k;
 
 create temporary table over10k(

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q Fri Nov  7 20:41:34 2014
@@ -3,45 +3,6 @@ SET hive.auto.convert.join=true;
 
 -- SORT_QUERY_RESULTS
 
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
-DROP TABLE lineitem;
-CREATE TABLE lineitem (L_ORDERKEY      INT,
-                                L_PARTKEY       INT,
-                                L_SUPPKEY       INT,
-                                L_LINENUMBER    INT,
-                                L_QUANTITY      DOUBLE,
-                                L_EXTENDEDPRICE DOUBLE,
-                                L_DISCOUNT      DOUBLE,
-                                L_TAX           DOUBLE,
-                                L_RETURNFLAG    STRING,
-                                L_LINESTATUS    STRING,
-                                l_shipdate      STRING,
-                                L_COMMITDATE    STRING,
-                                L_RECEIPTDATE   STRING,
-                                L_SHIPINSTRUCT  STRING,
-                                L_SHIPMODE      STRING,
-                                L_COMMENT       STRING)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY '|';
-
-LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem;
-
 -- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer.
 -- Query copied from subquery_in.q
 

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_ptf.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_ptf.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_ptf.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/vectorized_ptf.q Fri Nov  7 20:41:34 2014
@@ -1,7 +1,7 @@
 SET hive.vectorized.execution.enabled=true;
 
 DROP TABLE part_staging;
-DROP TABLE part;
+DROP TABLE part_orc;
 
 -- NOTE: This test is a copy of ptf.
 -- NOTE: We cannot vectorize "pure" table functions (e.g. NOOP) -- given their blackbox nature. So only queries without table functions and
@@ -22,7 +22,7 @@ CREATE TABLE part_staging( 
 
 LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part_staging;
 
-CREATE TABLE part( 
+CREATE TABLE part_orc( 
     p_partkey INT,
     p_name STRING,
     p_mfgr STRING,
@@ -34,9 +34,9 @@ CREATE TABLE part( 
     p_comment STRING
 ) STORED AS ORC;
 
-DESCRIBE EXTENDED part;
+DESCRIBE EXTENDED part_orc;
 
-insert into table part select * from part_staging;
+insert into table part_orc select * from part_staging;
 
 --1. test1
 
@@ -45,7 +45,7 @@ select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
 sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
-from noop(on part 
+from noop(on part_orc 
   partition by p_mfgr
   order by p_name
   );
@@ -54,7 +54,7 @@ select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
 sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
-from noop(on part 
+from noop(on part_orc 
   partition by p_mfgr
   order by p_name
   );
@@ -64,14 +64,14 @@ from noop(on part 
 explain extended
 select p_mfgr, p_name,
 p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
-from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
+from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j
 distribute by j.p_mfgr
 sort by j.p_name)
 ;
 
 select p_mfgr, p_name,
 p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
-from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
+from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j
 distribute by j.p_mfgr
 sort by j.p_name)
 ;    
@@ -80,12 +80,12 @@ sort by j.p_name)
 
 explain extended
 select p_mfgr, p_name, p_size
-from noop(on part
+from noop(on part_orc
 partition by p_mfgr
 order by p_name);
 
 select p_mfgr, p_name, p_size
-from noop(on part
+from noop(on part_orc
 partition by p_mfgr
 order by p_name);
 
@@ -96,7 +96,7 @@ select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
 sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
-from noop(on part 
+from noop(on part_orc 
   partition by p_mfgr
   order by p_name
   ) abc;
@@ -105,7 +105,7 @@ select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
 sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
-from noop(on part 
+from noop(on part_orc 
   partition by p_mfgr
   order by p_name
   ) abc;
@@ -117,7 +117,7 @@ select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
 p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz 
-from noop(on part 
+from noop(on part_orc 
           partition by p_mfgr 
           order by p_name 
           ) 
@@ -127,7 +127,7 @@ select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
 p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz 
-from noop(on part 
+from noop(on part_orc 
           partition by p_mfgr 
           order by p_name 
           ) 
@@ -140,7 +140,7 @@ select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
 p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz 
-from noop(on part 
+from noop(on part_orc 
           partition by p_mfgr 
           order by p_name 
           ) 
@@ -151,7 +151,7 @@ select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
 p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz 
-from noop(on part 
+from noop(on part_orc 
           partition by p_mfgr 
           order by p_name 
           ) 
@@ -162,28 +162,28 @@ group by p_mfgr, p_name, p_size  
 
 explain extended
 select abc.* 
-from noop(on part 
+from noop(on part_orc 
 partition by p_mfgr 
 order by p_name 
-) abc join part p1 on abc.p_partkey = p1.p_partkey;
+) abc join part_orc p1 on abc.p_partkey = p1.p_partkey;
 
 select abc.* 
-from noop(on part 
+from noop(on part_orc 
 partition by p_mfgr 
 order by p_name 
-) abc join part p1 on abc.p_partkey = p1.p_partkey;
+) abc join part_orc p1 on abc.p_partkey = p1.p_partkey;
 
 -- 8. testJoinRight
 
 explain extended
 select abc.* 
-from part p1 join noop(on part 
+from part_orc p1 join noop(on part_orc 
 partition by p_mfgr 
 order by p_name 
 ) abc on abc.p_partkey = p1.p_partkey;
 
 select abc.* 
-from part p1 join noop(on part 
+from part_orc p1 join noop(on part_orc 
 partition by p_mfgr 
 order by p_name 
 ) abc on abc.p_partkey = p1.p_partkey;
@@ -193,13 +193,13 @@ order by p_name 
 explain extended
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name, p_size desc) as r
-from noopwithmap(on part
+from noopwithmap(on part_orc
 partition by p_mfgr
 order by p_name, p_size desc);
 
 select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name, p_size desc) as r
-from noopwithmap(on part
+from noopwithmap(on part_orc
 partition by p_mfgr
 order by p_name, p_size desc);
 
@@ -210,7 +210,7 @@ select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
 sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
-from noopwithmap(on part 
+from noopwithmap(on part_orc 
   partition by p_mfgr
   order by p_name);
   
@@ -218,7 +218,7 @@ select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
 sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
-from noopwithmap(on part 
+from noopwithmap(on part_orc 
   partition by p_mfgr
   order by p_name);
   
@@ -229,7 +229,7 @@ select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
 sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
-from noop(on part
+from noop(on part_orc
 partition by p_mfgr
 order by p_name)
 ;
@@ -238,7 +238,7 @@ select p_mfgr, p_name, p_size,
 rank() over (partition by p_mfgr order by p_name) as r,
 dense_rank() over (partition by p_mfgr order by p_name) as dr,
 sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
-from noop(on part
+from noop(on part_orc
 partition by p_mfgr
 order by p_name)
 ;
@@ -250,7 +250,7 @@ select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
 sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
-from noop(on noopwithmap(on noop(on part 
+from noop(on noopwithmap(on noop(on part_orc 
 partition by p_mfgr 
 order by p_mfgr, p_name
 )));
@@ -259,7 +259,7 @@ select p_mfgr, p_name, p_size, 
 rank() over (partition by p_mfgr order by p_name) as r, 
 dense_rank() over (partition by p_mfgr order by p_name) as dr, 
 sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row)  as s1
-from noop(on noopwithmap(on noop(on part 
+from noop(on noopwithmap(on noop(on part_orc 
 partition by p_mfgr 
 order by p_mfgr, p_name
 )));
@@ -273,7 +273,7 @@ from (select p_mfgr, p_name, 
 count(p_size) over (partition by p_mfgr order by p_name) as cd, 
 p_retailprice, 
 sum(p_retailprice) over w1  as s1
-from noop(on part 
+from noop(on part_orc 
 partition by p_mfgr 
 order by p_name) 
 window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) 
@@ -285,7 +285,7 @@ from (select p_mfgr, p_name, 
 count(p_size) over (partition by p_mfgr order by p_name) as cd, 
 p_retailprice, 
 sum(p_retailprice) over w1  as s1
-from noop(on part 
+from noop(on part_orc 
 partition by p_mfgr 
 order by p_name) 
 window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) 
@@ -300,10 +300,10 @@ dense_rank() over (distribute by abc.p_m
 count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, 
 abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, 
 abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz 
-from noop(on part 
+from noop(on part_orc 
 partition by p_mfgr 
 order by p_name 
-) abc join part p1 on abc.p_partkey = p1.p_partkey 
+) abc join part_orc p1 on abc.p_partkey = p1.p_partkey 
 ;
 
 select abc.p_mfgr, abc.p_name, 
@@ -312,22 +312,22 @@ dense_rank() over (distribute by abc.p_m
 count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd, 
 abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, 
 abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz 
-from noop(on part 
+from noop(on part_orc 
 partition by p_mfgr 
 order by p_name 
-) abc join part p1 on abc.p_partkey = p1.p_partkey 
+) abc join part_orc p1 on abc.p_partkey = p1.p_partkey 
 ;
 
 -- 15. testDistinctInSelectWithPTF
 
 explain extended
 select DISTINCT p_mfgr, p_name, p_size 
-from noop(on part 
+from noop(on part_orc 
 partition by p_mfgr 
 order by p_name);
 
 select DISTINCT p_mfgr, p_name, p_size 
-from noop(on part 
+from noop(on part_orc 
 partition by p_mfgr 
 order by p_name);
 
@@ -336,7 +336,7 @@ order by p_name);
 create view IF NOT EXISTS mfgr_price_view as 
 select p_mfgr, p_brand, 
 sum(p_retailprice) as s 
-from part 
+from part_orc 
 group by p_mfgr, p_brand;
 
 explain extended
@@ -374,7 +374,7 @@ cud DOUBLE, 
 fv1 INT);
 
 explain extended
-from noop(on part 
+from noop(on part_orc 
 partition by p_mfgr 
 order by p_name) 
 INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, 
@@ -389,7 +389,7 @@ cume_dist() over (distribute by p_mfgr s
 first_value(p_size, true) over w1  as fv1
 window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 preceding and 2 following);
 
-from noop(on part 
+from noop(on part_orc 
 partition by p_mfgr 
 order by p_name) 
 INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, 
@@ -418,7 +418,7 @@ p_size, sum(p_size) over (partition by p
 from noop(on 
         noopwithmap(on 
           noop(on 
-              noop(on part 
+              noop(on part_orc 
               partition by p_mfgr 
               order by p_mfgr) 
             ) 
@@ -434,7 +434,7 @@ p_size, sum(p_size) over (partition by p
 from noop(on 
         noopwithmap(on 
           noop(on 
-              noop(on part 
+              noop(on part_orc 
               partition by p_mfgr 
               order by p_mfgr) 
             ) 
@@ -453,7 +453,7 @@ p_size, sum(p_size) over (partition by p
 from noop(on 
         noop(on 
           noop(on 
-              noop(on part 
+              noop(on part_orc 
               partition by p_mfgr 
               order by p_mfgr) 
             ) 
@@ -469,7 +469,7 @@ p_size, sum(p_size) over (partition by p
 from noop(on 
         noop(on 
           noop(on 
-              noop(on part 
+              noop(on part_orc 
               partition by p_mfgr 
               order by p_mfgr) 
             ) 
@@ -488,7 +488,7 @@ p_size, sum(p_size) over (partition by p
 from noop(on 
         noop(on 
           noop(on 
-              noop(on part 
+              noop(on part_orc 
               partition by p_mfgr,p_name 
               order by p_mfgr,p_name) 
             ) 
@@ -502,7 +502,7 @@ p_size, sum(p_size) over (partition by p
 from noop(on 
         noop(on 
           noop(on 
-              noop(on part 
+              noop(on part_orc 
               partition by p_mfgr,p_name 
               order by p_mfgr,p_name) 
             ) 
@@ -520,7 +520,7 @@ p_size, sum(p_size) over (partition by p
 from noopwithmap(on 
         noop(on 
           noop(on 
-              noop(on part 
+              noop(on part_orc 
               partition by p_mfgr,p_name 
               order by p_mfgr,p_name) 
             ) 
@@ -536,7 +536,7 @@ p_size, sum(p_size) over (partition by p
 from noopwithmap(on 
         noop(on 
           noop(on 
-              noop(on part 
+              noop(on part_orc 
               partition by p_mfgr,p_name 
               order by p_mfgr,p_name) 
             ) 
@@ -556,7 +556,7 @@ sum(p_size) over (partition by p_mfgr,p_
 sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row)  as s2
 from noop(on 
         noopwithmap(on 
-              noop(on part 
+              noop(on part_orc 
               partition by p_mfgr, p_name 
               order by p_mfgr, p_name) 
           partition by p_mfgr 
@@ -571,7 +571,7 @@ sum(p_size) over (partition by p_mfgr,p_
 sum(p_size) over (partition by p_mfgr,p_name order by p_mfgr,p_name rows between unbounded preceding and current row)  as s2
 from noop(on 
         noopwithmap(on 
-              noop(on part 
+              noop(on part_orc 
               partition by p_mfgr, p_name 
               order by p_mfgr, p_name) 
           partition by p_mfgr 
@@ -589,7 +589,7 @@ sum(p_size) over (partition by p_mfgr or
 sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row)  as s2
 from noopwithmap(on 
         noop(on 
-              noop(on part 
+              noop(on part_orc 
               partition by p_mfgr, p_name 
               order by p_mfgr, p_name) 
           ));
@@ -602,7 +602,7 @@ sum(p_size) over (partition by p_mfgr or
 sum(p_size) over (partition by p_mfgr order by p_name range between unbounded preceding and current row)  as s2
 from noopwithmap(on 
         noop(on 
-              noop(on part 
+              noop(on part_orc 
               partition by p_mfgr, p_name 
               order by p_mfgr, p_name) 
           ));

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/windowing.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/windowing.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/windowing.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/windowing.q Fri Nov  7 20:41:34 2014
@@ -1,20 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 -- 1. testWindowing
 select p_mfgr, p_name, p_size,
 rank() over(distribute by p_mfgr sort by p_name) as r,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/windowing_adjust_rowcontainer_sz.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/windowing_adjust_rowcontainer_sz.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/windowing_adjust_rowcontainer_sz.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/windowing_adjust_rowcontainer_sz.q Fri Nov  7 20:41:34 2014
@@ -1,20 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 set hive.join.cache.size=1;
 
 select p_mfgr, p_name, p_size,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/windowing_columnPruning.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/windowing_columnPruning.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/windowing_columnPruning.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/windowing_columnPruning.q Fri Nov  7 20:41:34 2014
@@ -1,20 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 -- 1. testQueryLevelPartitionColsNotInSelect
 select p_size,
 sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/windowing_decimal.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/windowing_decimal.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/windowing_decimal.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/windowing_decimal.q Fri Nov  7 20:41:34 2014
@@ -1,21 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
-
 create table part_dec(
     p_partkey INT,
     p_name STRING,

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/windowing_expressions.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/windowing_expressions.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/windowing_expressions.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/windowing_expressions.q Fri Nov  7 20:41:34 2014
@@ -1,20 +1,3 @@
-DROP TABLE part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 drop table over10k;
 
 create table over10k(

Modified: hive/branches/spark/ql/src/test/queries/clientpositive/windowing_streaming.q
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/queries/clientpositive/windowing_streaming.q?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/queries/clientpositive/windowing_streaming.q (original)
+++ hive/branches/spark/ql/src/test/queries/clientpositive/windowing_streaming.q Fri Nov  7 20:41:34 2014
@@ -1,20 +1,3 @@
-DROP TABLE if exists part;
-
--- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-);
-
-LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-
 drop table over10k;
 
 create table over10k(

Modified: hive/branches/spark/ql/src/test/results/clientnegative/limit_partition_stats.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/limit_partition_stats.q.out?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/limit_partition_stats.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/limit_partition_stats.q.out Fri Nov  7 20:41:34 2014
@@ -2,102 +2,7 @@ PREHOOK: query: create table part (c int
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@part
-POSTHOOK: query: create table part (c int) partitioned by (d string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part
-PREHOOK: query: insert into table part partition (d)
-select hr,ds from srcpart
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-PREHOOK: Output: default@part
-POSTHOOK: query: insert into table part partition (d)
-select hr,ds from srcpart
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@part@d=2008-04-08
-POSTHOOK: Output: default@part@d=2008-04-09
-POSTHOOK: Lineage: part PARTITION(d=2008-04-08).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
-POSTHOOK: Lineage: part PARTITION(d=2008-04-09).c EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
-PREHOOK: query: explain select count(*) from part
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select count(*) from part
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: 1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select count(*) from part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part
+FAILED: Hive Internal Error: java.lang.RuntimeException(Cannot overwrite read-only table: part)
+java.lang.RuntimeException: Cannot overwrite read-only table: part
 #### A masked pattern was here ####
-POSTHOOK: query: select count(*) from part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part
-#### A masked pattern was here ####
-2000
-PREHOOK: query: explain select count(*) from part
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select count(*) from part
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: part
-            Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: bigint)
-            outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
 
-FAILED: SemanticException Number of partitions scanned (=2) on table part exceeds limit (=1). This is controlled by hive.limit.query.max.table.partition.

Modified: hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q.out?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q.out Fri Nov  7 20:41:34 2014
@@ -1,41 +1 @@
-PREHOOK: query: DROP TABLE part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@part
-POSTHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@part
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@part
-FAILED: SemanticException [Error 10025]: Line 4:7 Expression not in GROUP BY key 'p_mfgr'
+FAILED: SemanticException [Error 10025]: Line 2:7 Expression not in GROUP BY key 'p_mfgr'

Modified: hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_AmbiguousWindowDefn.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_AmbiguousWindowDefn.q.out?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_AmbiguousWindowDefn.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_AmbiguousWindowDefn.q.out Fri Nov  7 20:41:34 2014
@@ -1,41 +1 @@
-PREHOOK: query: DROP TABLE part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@part
-POSTHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@part
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@part
 FAILED: SemanticException Cycle in Window references [w3, w3]

Modified: hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_DistributeByOrderBy.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_DistributeByOrderBy.q.out?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_DistributeByOrderBy.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_DistributeByOrderBy.q.out Fri Nov  7 20:41:34 2014
@@ -1,34 +1,2 @@
-PREHOOK: query: DROP TABLE part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@part
-POSTHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part
-FAILED: ParseException line 5:46 missing ) at 'order' near 'p_mfgr'
-line 5:61 missing EOF at ')' near 'p_mfgr'
+FAILED: ParseException line 3:46 missing ) at 'order' near 'p_mfgr'
+line 3:61 missing EOF at ')' near 'p_mfgr'

Modified: hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_DuplicateWindowAlias.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_DuplicateWindowAlias.q.out?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_DuplicateWindowAlias.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_DuplicateWindowAlias.q.out Fri Nov  7 20:41:34 2014
@@ -1,33 +1 @@
-PREHOOK: query: DROP TABLE part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@part
-POSTHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part
-FAILED: SemanticException 10:7 Duplicate definition of window w2 is not allowed. Error encountered near token 'w2'
+FAILED: SemanticException 8:7 Duplicate definition of window w2 is not allowed. Error encountered near token 'w2'

Modified: hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithNoGBYNoWindowing.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithNoGBYNoWindowing.q.out?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithNoGBYNoWindowing.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithNoGBYNoWindowing.q.out Fri Nov  7 20:41:34 2014
@@ -1,33 +1 @@
-PREHOOK: query: DROP TABLE part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@part
-POSTHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part
 FAILED: SemanticException HAVING specified without GROUP BY

Modified: hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithPTF.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithPTF.q.out?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithPTF.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_HavingLeadWithPTF.q.out Fri Nov  7 20:41:34 2014
@@ -1,33 +1 @@
-PREHOOK: query: DROP TABLE part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@part
-POSTHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part
 FAILED: SemanticException HAVING specified without GROUP BY

Modified: hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_InvalidValueBoundary.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_InvalidValueBoundary.q.out?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_InvalidValueBoundary.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_InvalidValueBoundary.q.out Fri Nov  7 20:41:34 2014
@@ -1,36 +1,2 @@
-PREHOOK: query: DROP TABLE part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING,
-    p_complex array<int>
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@part
-POSTHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING,
-    p_complex array<int>
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part
 FAILED: SemanticException Failed to breakup Windowing invocations into Groups. At least 1 group must only depend on input columns. Also check for circular dependencies.
-Underlying error: Value Boundary expression must be of primitve type. Found: array<int>
+Underlying error: org.apache.hadoop.hive.ql.parse.SemanticException: Line 6:43 Invalid table alias or column reference 'p_complex': (possible column names are: p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment)

Modified: hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_JoinWithAmbigousAlias.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_JoinWithAmbigousAlias.q.out?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_JoinWithAmbigousAlias.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_JoinWithAmbigousAlias.q.out Fri Nov  7 20:41:34 2014
@@ -1,33 +1 @@
-PREHOOK: query: DROP TABLE part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@part
-POSTHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part
-FAILED: SemanticException [Error 10008]: Line 8:11 Ambiguous table alias 'part'
+FAILED: SemanticException [Error 10008]: Line 6:11 Ambiguous table alias 'part'

Modified: hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_PartitionBySortBy.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_PartitionBySortBy.q.out?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_PartitionBySortBy.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_PartitionBySortBy.q.out Fri Nov  7 20:41:34 2014
@@ -1,34 +1,2 @@
-PREHOOK: query: DROP TABLE part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@part
-POSTHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part
-FAILED: ParseException line 5:45 missing ) at 'sort' near 'p_mfgr'
-line 5:59 missing EOF at ')' near 'p_mfgr'
+FAILED: ParseException line 3:45 missing ) at 'sort' near 'p_mfgr'
+line 3:59 missing EOF at ')' near 'p_mfgr'

Modified: hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_WhereWithRankCond.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_WhereWithRankCond.q.out?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_WhereWithRankCond.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/ptf_negative_WhereWithRankCond.q.out Fri Nov  7 20:41:34 2014
@@ -1,33 +1 @@
-PREHOOK: query: DROP TABLE part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@part
-POSTHOOK: query: CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part
-FAILED: SemanticException [Error 10004]: Line 7:6 Invalid table alias or column reference 'r': (possible column names are: p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment)
+FAILED: SemanticException [Error 10004]: Line 5:6 Invalid table alias or column reference 'r': (possible column names are: p_partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment)

Modified: hive/branches/spark/ql/src/test/results/clientnegative/ptf_window_boundaries.q.out
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/results/clientnegative/ptf_window_boundaries.q.out?rev=1637444&r1=1637443&r2=1637444&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/results/clientnegative/ptf_window_boundaries.q.out (original)
+++ hive/branches/spark/ql/src/test/results/clientnegative/ptf_window_boundaries.q.out Fri Nov  7 20:41:34 2014
@@ -1,31 +1 @@
-PREHOOK: query: -- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@part
-POSTHOOK: query: -- data setup
-CREATE TABLE part( 
-    p_partkey INT,
-    p_name STRING,
-    p_mfgr STRING,
-    p_brand STRING,
-    p_type STRING,
-    p_size INT,
-    p_container STRING,
-    p_retailprice DOUBLE,
-    p_comment STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@part
-FAILED: ParseException line 4:44 mismatched input 'following' expecting KW_PRECEDING near 'unbounded' in windowframestartboundary
+FAILED: ParseException line 2:44 mismatched input 'following' expecting KW_PRECEDING near 'unbounded' in windowframestartboundary