You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2018/05/21 20:25:00 UTC

[23/51] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q b/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
index 670211c..9421171 100644
--- a/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
+++ b/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
@@ -3,23 +3,23 @@ set hive.multi.insert.move.tasks.share.dependencies=true;
 set hive.stats.dbclass=fs;
 -- SORT_QUERY_RESULTS
 
-create table src_multi1 like src;
-create table src_multi2 like src;
+create table src_multi1_n4 like src;
+create table src_multi2_n5 like src;
 
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 
 set hive.merge.mapfiles=true;
@@ -27,45 +27,45 @@ set hive.merge.mapredfiles=false;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=true;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 
 
@@ -74,15 +74,15 @@ set hive.merge.mapredfiles=false;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10 group by key, value
-insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
+insert overwrite table src_multi1_n4 select * where key < 10 group by key, value
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value;
 
 from src
-insert overwrite table src_multi1 select * where key < 10 group by key, value
-insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
+insert overwrite table src_multi1_n4 select * where key < 10 group by key, value
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 
 set hive.merge.mapfiles=false;
@@ -90,30 +90,30 @@ set hive.merge.mapredfiles=true;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10 group by key, value
-insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
+insert overwrite table src_multi1_n4 select * where key < 10 group by key, value
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value;
 
 from src
-insert overwrite table src_multi1 select * where key < 10 group by key, value
-insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
+insert overwrite table src_multi1_n4 select * where key < 10 group by key, value
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=false;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10 group by key, value
-insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
+insert overwrite table src_multi1_n4 select * where key < 10 group by key, value
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value;
 
 from src
-insert overwrite table src_multi1 select * where key < 10 group by key, value
-insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
+insert overwrite table src_multi1_n4 select * where key < 10 group by key, value
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 
 set hive.merge.mapfiles=true;
@@ -121,15 +121,15 @@ set hive.merge.mapredfiles=true;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10 group by key, value
-insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
+insert overwrite table src_multi1_n4 select * where key < 10 group by key, value
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value;
 
 from src
-insert overwrite table src_multi1 select * where key < 10 group by key, value
-insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value;
+insert overwrite table src_multi1_n4 select * where key < 10 group by key, value
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 group by key, value;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 
 
@@ -139,60 +139,60 @@ set hive.merge.mapredfiles=false;
 
 explain
 from (select * from src  union all select * from src) s
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
 from (select * from src  union all select * from src) s
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=false;
 
 explain
 from (select * from src  union all select * from src) s
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
 from (select * from src  union all select * from src) s
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=true;
 
 explain
 from (select * from src  union all select * from src) s
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
 from (select * from src  union all select * from src) s
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 
 explain
 from (select * from src  union all select * from src) s
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
 from (select * from src  union all select * from src) s
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 
 
@@ -333,19 +333,19 @@ set hive.merge.mapredfiles=false;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value;
 
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 dfs -ls ${system:test.tmp.dir}/hive_test/multiins_local;
 dfs -rmr ${system:test.tmp.dir}/hive_test/multiins_local;
@@ -355,19 +355,19 @@ set hive.merge.mapredfiles=true;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value;
 
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 dfs -ls ${system:test.tmp.dir}/hive_test/multiins_local;
 dfs -rmr ${system:test.tmp.dir}/hive_test/multiins_local;
@@ -377,19 +377,19 @@ set hive.merge.mapredfiles=false;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value;
 
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 dfs -ls ${system:test.tmp.dir}/hive_test/multiins_local;
 dfs -rmr ${system:test.tmp.dir}/hive_test/multiins_local;
@@ -399,19 +399,19 @@ set hive.merge.mapredfiles=true;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value;
 
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/1' select * where key < 10 group by key, value cluster by key
 insert overwrite local directory '${system:test.tmp.dir}/hive_test/multiins_local/2' select * where key > 10 and key < 20 group by key, value cluster by value;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 dfs -ls ${system:test.tmp.dir}/hive_test/multiins_local;
 dfs -rmr ${system:test.tmp.dir}/hive_test/multiins_local;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/multi_insert_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/multi_insert_partitioned.q b/ql/src/test/queries/clientpositive/multi_insert_partitioned.q
index 0d78c72..d818613 100644
--- a/ql/src/test/queries/clientpositive/multi_insert_partitioned.q
+++ b/ql/src/test/queries/clientpositive/multi_insert_partitioned.q
@@ -4,46 +4,46 @@ set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 set hive.exec.dynamic.partition.mode=nonstrict;
 
-drop table intermediate;
+drop table intermediate_n3;
 
-create table intermediate(key int) partitioned by (p int) stored as orc;
-insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2;
-insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2;
-insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2;
+create table intermediate_n3(key int) partitioned by (p int) stored as orc;
+insert into table intermediate_n3 partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2;
+insert into table intermediate_n3 partition(p='456') select distinct key from src where key is not null order by key asc limit 2;
+insert into table intermediate_n3 partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2;
 
 drop table multi_partitioned;
 
 create table multi_partitioned (key int, key2 int) partitioned by (p int);
 
-from intermediate
+from intermediate_n3
 insert into table multi_partitioned partition(p=1) select p, key
 insert into table multi_partitioned partition(p=2) select key, p;
 
 select * from multi_partitioned order by key, key2, p;
 desc formatted multi_partitioned;
 
-from intermediate
+from intermediate_n3
 insert overwrite table multi_partitioned partition(p=2) select p, key
 insert overwrite table multi_partitioned partition(p=1) select key, p;
 
 select * from multi_partitioned order by key, key2, p;
 desc formatted multi_partitioned;
 
-from intermediate
+from intermediate_n3
 insert into table multi_partitioned partition(p=2) select p, key
 insert overwrite table multi_partitioned partition(p=1) select key, p;
 
 select * from multi_partitioned order by key, key2, p;
 desc formatted multi_partitioned;
 
-from intermediate
+from intermediate_n3
 insert into table multi_partitioned partition(p) select p, key, p
 insert into table multi_partitioned partition(p=1) select key, p;
 
 select key, key2, p from multi_partitioned order by key, key2, p;
 desc formatted multi_partitioned;
 
-from intermediate
+from intermediate_n3
 insert into table multi_partitioned partition(p) select p, key, 1
 insert into table multi_partitioned partition(p=1) select key, p;
 
@@ -52,6 +52,6 @@ desc formatted multi_partitioned;
 
 drop table multi_partitioned;
 
-drop table intermediate;
+drop table intermediate_n3;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/multi_insert_union_src.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/multi_insert_union_src.q b/ql/src/test/queries/clientpositive/multi_insert_union_src.q
index ae4c8d4..4776e7e 100644
--- a/ql/src/test/queries/clientpositive/multi_insert_union_src.q
+++ b/ql/src/test/queries/clientpositive/multi_insert_union_src.q
@@ -1,23 +1,23 @@
 --! qt:dataset:src1
 --! qt:dataset:src
 set hive.mapred.mode=nonstrict;
-drop table if exists src2;
-drop table if exists src_multi1;
-drop table if exists src_multi2;
+drop table if exists src2_n4;
+drop table if exists src_multi1_n3;
+drop table if exists src_multi2_n4;
 set hive.stats.dbclass=fs;
-CREATE TABLE src2 as SELECT * FROM src;
+CREATE TABLE src2_n4 as SELECT * FROM src;
 
-create table src_multi1 like src;
-create table src_multi2 like src;
+create table src_multi1_n3 like src;
+create table src_multi2_n4 like src;
 
 explain
-from (select * from src1 where key < 10 union all select * from src2 where key > 100) s
-insert overwrite table src_multi1 select key, value where key < 150 order by key
-insert overwrite table src_multi2 select key, value where key > 400 order by value;
+from (select * from src1 where key < 10 union all select * from src2_n4 where key > 100) s
+insert overwrite table src_multi1_n3 select key, value where key < 150 order by key
+insert overwrite table src_multi2_n4 select key, value where key > 400 order by value;
 
-from (select * from src1 where key < 10 union all select * from src2 where key > 100) s
-insert overwrite table src_multi1 select key, value where key < 150 order by key
-insert overwrite table src_multi2 select key, value where key > 400 order by value;
+from (select * from src1 where key < 10 union all select * from src2_n4 where key > 100) s
+insert overwrite table src_multi1_n3 select key, value where key < 150 order by key
+insert overwrite table src_multi2_n4 select key, value where key > 400 order by value;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n3;
+select * from src_multi2_n4;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/multigroupby_singlemr.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/multigroupby_singlemr.q b/ql/src/test/queries/clientpositive/multigroupby_singlemr.q
index a4e86ad..a5f6fea 100644
--- a/ql/src/test/queries/clientpositive/multigroupby_singlemr.q
+++ b/ql/src/test/queries/clientpositive/multigroupby_singlemr.q
@@ -1,33 +1,33 @@
-CREATE TABLE TBL(C1 INT, C2 INT, C3 INT, C4 INT);
+CREATE TABLE TBL_n0(C1 INT, C2 INT, C3 INT, C4 INT);
 
-CREATE TABLE DEST1(d1 INT, d2 INT) STORED AS TEXTFILE;
-CREATE TABLE DEST2(d1 INT, d2 INT, d3 INT) STORED AS TEXTFILE;
-CREATE TABLE DEST3(d1 INT, d2 INT, d3 INT, d4 INT) STORED AS TEXTFILE;
+CREATE TABLE DEST1_n116(d1 INT, d2 INT) STORED AS TEXTFILE;
+CREATE TABLE DEST2_n30(d1 INT, d2 INT, d3 INT) STORED AS TEXTFILE;
+CREATE TABLE DEST3_n4(d1 INT, d2 INT, d3 INT, d4 INT) STORED AS TEXTFILE;
 CREATE TABLE DEST4(d1 INT, d2 INT, d3 INT, d4 INT) STORED AS TEXTFILE;
 
 EXPLAIN
-FROM TBL
-INSERT OVERWRITE TABLE DEST1 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1
-INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2;
+FROM TBL_n0
+INSERT OVERWRITE TABLE DEST1_n116 SELECT TBL_n0.C1, COUNT(TBL_n0.C2) GROUP BY TBL_n0.C1
+INSERT OVERWRITE TABLE DEST2_n30 SELECT TBL_n0.C1, TBL_n0.C2, COUNT(TBL_n0.C3) GROUP BY TBL_n0.C1, TBL_n0.C2;
 
 EXPLAIN
-FROM TBL
-INSERT OVERWRITE TABLE DEST1 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1
-INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C2, TBL.C1;
+FROM TBL_n0
+INSERT OVERWRITE TABLE DEST1_n116 SELECT TBL_n0.C1, COUNT(TBL_n0.C2) GROUP BY TBL_n0.C1
+INSERT OVERWRITE TABLE DEST2_n30 SELECT TBL_n0.C1, TBL_n0.C2, COUNT(TBL_n0.C3) GROUP BY TBL_n0.C2, TBL_n0.C1;
 
 EXPLAIN
-FROM TBL
-INSERT OVERWRITE TABLE DEST3 SELECT TBL.C1, TBL.C2, TBL.C3, COUNT(TBL.C4) GROUP BY TBL.C1, TBL.C2, TBL.C3
-INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2;
+FROM TBL_n0
+INSERT OVERWRITE TABLE DEST3_n4 SELECT TBL_n0.C1, TBL_n0.C2, TBL_n0.C3, COUNT(TBL_n0.C4) GROUP BY TBL_n0.C1, TBL_n0.C2, TBL_n0.C3
+INSERT OVERWRITE TABLE DEST2_n30 SELECT TBL_n0.C1, TBL_n0.C2, COUNT(TBL_n0.C3) GROUP BY TBL_n0.C1, TBL_n0.C2;
 
 EXPLAIN
-FROM TBL
-INSERT OVERWRITE TABLE DEST3 SELECT TBL.C1, TBL.C2, TBL.C3, COUNT(TBL.C4) GROUP BY TBL.C1, TBL.C2, TBL.C3
-INSERT OVERWRITE TABLE DEST4 SELECT TBL.C1, TBL.C2, TBL.C3, COUNT(TBL.C4) GROUP BY TBL.C1, TBL.C3, TBL.C2;
+FROM TBL_n0
+INSERT OVERWRITE TABLE DEST3_n4 SELECT TBL_n0.C1, TBL_n0.C2, TBL_n0.C3, COUNT(TBL_n0.C4) GROUP BY TBL_n0.C1, TBL_n0.C2, TBL_n0.C3
+INSERT OVERWRITE TABLE DEST4 SELECT TBL_n0.C1, TBL_n0.C2, TBL_n0.C3, COUNT(TBL_n0.C4) GROUP BY TBL_n0.C1, TBL_n0.C3, TBL_n0.C2;
 
 
 EXPLAIN
-FROM TBL
-INSERT OVERWRITE TABLE DEST3 SELECT TBL.C1, TBL.C2, TBL.C3, COUNT(TBL.C4) GROUP BY TBL.C1, TBL.C2, TBL.C3
-INSERT OVERWRITE TABLE DEST2 SELECT TBL.C1, TBL.C2, COUNT(TBL.C3) GROUP BY TBL.C1, TBL.C2
-INSERT OVERWRITE TABLE DEST1 SELECT TBL.C1, COUNT(TBL.C2) GROUP BY TBL.C1;
+FROM TBL_n0
+INSERT OVERWRITE TABLE DEST3_n4 SELECT TBL_n0.C1, TBL_n0.C2, TBL_n0.C3, COUNT(TBL_n0.C4) GROUP BY TBL_n0.C1, TBL_n0.C2, TBL_n0.C3
+INSERT OVERWRITE TABLE DEST2_n30 SELECT TBL_n0.C1, TBL_n0.C2, COUNT(TBL_n0.C3) GROUP BY TBL_n0.C1, TBL_n0.C2
+INSERT OVERWRITE TABLE DEST1_n116 SELECT TBL_n0.C1, COUNT(TBL_n0.C2) GROUP BY TBL_n0.C1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/named_column_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/named_column_join.q b/ql/src/test/queries/clientpositive/named_column_join.q
index 6de9325..7a0cc58 100644
--- a/ql/src/test/queries/clientpositive/named_column_join.q
+++ b/ql/src/test/queries/clientpositive/named_column_join.q
@@ -1,52 +1,52 @@
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
-create table t (a int, b int);
+create table t_n8 (a int, b int);
 
-insert into t values (1,2),(2,1),(3,4),(4,3),(3,3),(null,null),(null,1),(2,null);
+insert into t_n8 values (1,2),(2,1),(3,4),(4,3),(3,3),(null,null),(null,1),(2,null);
 
-explain select * from t t1 join t t2 using (a);
+explain select * from t_n8 t1 join t_n8 t2 using (a);
 
-select * from t t1 join t t2 using (a);
+select * from t_n8 t1 join t_n8 t2 using (a);
 
-select * from t t1 join t t2 using (a,b);
+select * from t_n8 t1 join t_n8 t2 using (a,b);
 
-select t1.a,t2.b,t1.b,t2.a from t t1 join t t2 using (a);
+select t1.a,t2.b,t1.b,t2.a from t_n8 t1 join t_n8 t2 using (a);
 
-select * from t t1 left outer join t t2 using (a,b);
+select * from t_n8 t1 left outer join t_n8 t2 using (a,b);
 
-select t1.a,t1.b from t t1 right outer join t t2 on (t1.a=t2.a and t1.b=t2.b);
+select t1.a,t1.b from t_n8 t1 right outer join t_n8 t2 on (t1.a=t2.a and t1.b=t2.b);
 
-select * from t t1 right outer join t t2 using (a,b);
+select * from t_n8 t1 right outer join t_n8 t2 using (a,b);
 
-select * from t t1 inner join t t2 using (a,b);
+select * from t_n8 t1 inner join t_n8 t2 using (a,b);
 
-select * from t t1 left outer join t t2 using (b);
+select * from t_n8 t1 left outer join t_n8 t2 using (b);
 
-select * from t t1 right outer join t t2 using (b);
+select * from t_n8 t1 right outer join t_n8 t2 using (b);
 
-select * from t t1 inner join t t2 using (b);
+select * from t_n8 t1 inner join t_n8 t2 using (b);
 
-drop view v;
+drop view v_n4;
 
-create view v as select * from t t1 join t t2 using (a,b);
+create view v_n4 as select * from t_n8 t1 join t_n8 t2 using (a,b);
 
-desc formatted v;
+desc formatted v_n4;
 
-select * from v;
+select * from v_n4;
 
-drop view v;
+drop view v_n4;
 
-create view v as select * from t t1 right outer join t t2 using (b,a);
+create view v_n4 as select * from t_n8 t1 right outer join t_n8 t2 using (b,a);
 
-desc formatted v;
+desc formatted v_n4;
 
-select * from v;
+select * from v_n4;
 
-select * from (select t1.b b from t t1 inner join t t2 using (b)) t3 join t t4 using(b);
+select * from (select t1.b b from t_n8 t1 inner join t_n8 t2 using (b)) t3 join t_n8 t4 using(b);
 
-select * from (select t2.a a from t t1 inner join t t2 using (b)) t3 join t t4 using(a);
+select * from (select t2.a a from t_n8 t1 inner join t_n8 t2 using (b)) t3 join t_n8 t4 using(a);
 
-create table tt as select * from (select t2.a a from t t1 inner join t t2 using (b)) t3 join t t4 using(a);
+create table tt_n0 as select * from (select t2.a a from t_n8 t1 inner join t_n8 t2 using (b)) t3 join t_n8 t4 using(a);
 
-desc formatted tt;
+desc formatted tt_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/nested_column_pruning.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/nested_column_pruning.q b/ql/src/test/queries/clientpositive/nested_column_pruning.q
index ab5e16b..5b41f18 100644
--- a/ql/src/test/queries/clientpositive/nested_column_pruning.q
+++ b/ql/src/test/queries/clientpositive/nested_column_pruning.q
@@ -7,12 +7,12 @@ set hive.exec.dynamic.partition.mode = nonstrict;
 set hive.strict.checks.cartesian.product=false;
 
 -- First, create source tables
-DROP TABLE IF EXISTS dummy;
-CREATE TABLE dummy (i int);
-INSERT INTO TABLE dummy VALUES (42);
+DROP TABLE IF EXISTS dummy_n5;
+CREATE TABLE dummy_n5 (i int);
+INSERT INTO TABLE dummy_n5 VALUES (42);
 
-DROP TABLE IF EXISTS nested_tbl_1;
-CREATE TABLE nested_tbl_1 (
+DROP TABLE IF EXISTS nested_tbl_1_n1;
+CREATE TABLE nested_tbl_1_n1 (
   a int,
   s1 struct<f1: boolean, f2: string, f3: struct<f4: int, f5: double>, f6: int>,
   s2 struct<f7: string, f8: struct<f9 : boolean, f10: array<int>, f11: map<string, boolean>>>,
@@ -22,7 +22,7 @@ CREATE TABLE nested_tbl_1 (
   s6 map<string, struct<f20:array<struct<f21:struct<f22:int>>>>>
 ) STORED AS PARQUET;
 
-INSERT INTO TABLE nested_tbl_1 SELECT
+INSERT INTO TABLE nested_tbl_1_n1 SELECT
   1, named_struct('f1', false, 'f2', 'foo', 'f3', named_struct('f4', 4, 'f5', cast(5.0 as double)), 'f6', 4),
   named_struct('f7', 'f7', 'f8', named_struct('f9', true, 'f10', array(10, 11), 'f11', map('key1', true, 'key2', false))),
   named_struct('f12', array(named_struct('f13', 'foo', 'f14', 14), named_struct('f13', 'bar', 'f14', 28))),
@@ -30,12 +30,12 @@ INSERT INTO TABLE nested_tbl_1 SELECT
   named_struct('f16', array(named_struct('f17', 'foo', 'f18', named_struct('f19', 14)), named_struct('f17', 'bar', 'f18', named_struct('f19', 28)))),
   map('key1', named_struct('f20', array(named_struct('f21', named_struct('f22', 1)))),
       'key2', named_struct('f20', array(named_struct('f21', named_struct('f22', 2)))))
-FROM dummy;
+FROM dummy_n5;
 
-DROP TABLE IF EXISTS nested_tbl_2;
-CREATE TABLE nested_tbl_2 LIKE nested_tbl_1;
+DROP TABLE IF EXISTS nested_tbl_2_n1;
+CREATE TABLE nested_tbl_2_n1 LIKE nested_tbl_1_n1;
 
-INSERT INTO TABLE nested_tbl_2 SELECT
+INSERT INTO TABLE nested_tbl_2_n1 SELECT
   2, named_struct('f1', true, 'f2', 'bar', 'f3', named_struct('f4', 4, 'f5', cast(6.5 as double)), 'f6', 4),
   named_struct('f7', 'f72', 'f8', named_struct('f9', false, 'f10', array(20, 22), 'f11', map('key3', true, 'key4', false))),
   named_struct('f12', array(named_struct('f13', 'bar', 'f14', 28), named_struct('f13', 'foo', 'f14', 56))),
@@ -43,175 +43,175 @@ INSERT INTO TABLE nested_tbl_2 SELECT
   named_struct('f16', array(named_struct('f17', 'bar', 'f18', named_struct('f19', 28)), named_struct('f17', 'foo', 'f18', named_struct('f19', 56)))),
   map('key3', named_struct('f20', array(named_struct('f21', named_struct('f22', 3)))),
       'key4', named_struct('f20', array(named_struct('f21', named_struct('f22', 4)))))
-FROM dummy;
+FROM dummy_n5;
 
 -- Testing only select statements
 
-EXPLAIN SELECT a FROM nested_tbl_1;
-SELECT a FROM nested_tbl_1;
+EXPLAIN SELECT a FROM nested_tbl_1_n1;
+SELECT a FROM nested_tbl_1_n1;
 
-EXPLAIN SELECT s1.f1 FROM nested_tbl_1;
-SELECT s1.f1 FROM nested_tbl_1;
+EXPLAIN SELECT s1.f1 FROM nested_tbl_1_n1;
+SELECT s1.f1 FROM nested_tbl_1_n1;
 
-EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1;
-SELECT s1.f1, s1.f2 FROM nested_tbl_1;
+EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1_n1;
+SELECT s1.f1, s1.f2 FROM nested_tbl_1_n1;
 
 -- In this case 's1.f3' and 's1.f3.f4' should be merged
-EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1;
-SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1;
+EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n1;
+SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n1;
 
 -- Testing select array and index shifting
-EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1;
-SELECT s1.f3.f5 FROM nested_tbl_1;
+EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n1;
+SELECT s1.f3.f5 FROM nested_tbl_1_n1;
 
 -- Testing select from multiple structs
-EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1;
-SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1;
+EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n1;
+SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n1;
 
 
 -- Testing select with filter
 
-EXPLAIN SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE;
-SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE;
+EXPLAIN SELECT s1.f2 FROM nested_tbl_1_n1 WHERE s1.f1 = FALSE;
+SELECT s1.f2 FROM nested_tbl_1_n1 WHERE s1.f1 = FALSE;
 
-EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4;
-SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4;
+EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n1 WHERE s1.f3.f4 = 4;
+SELECT s1.f3.f5 FROM nested_tbl_1_n1 WHERE s1.f3.f4 = 4;
 
-EXPLAIN SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE;
-SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE;
+EXPLAIN SELECT s2.f8 FROM nested_tbl_1_n1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE;
+SELECT s2.f8 FROM nested_tbl_1_n1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE;
 
 
 -- Testing lateral view
 
-EXPLAIN SELECT col1, col2 FROM nested_tbl_1
+EXPLAIN SELECT col1, col2 FROM nested_tbl_1_n1
 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1
 LATERAL VIEW explode(s3.f12) tbl2 AS col2;
-SELECT col1, col2 FROM nested_tbl_1
+SELECT col1, col2 FROM nested_tbl_1_n1
 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1
 LATERAL VIEW explode(s3.f12) tbl2 AS col2;
 
 
 -- Testing UDFs
-EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1;
-SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1;
+EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n1;
+SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n1;
 
 
 -- Testing aggregations
 
-EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5;
-SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5;
+EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3.f5;
+SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3.f5;
 
-EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3;
-SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3;
+EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3;
+SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3;
 
-EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3;
-SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3;
+EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3 ORDER BY s1.f3;
+SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n1 GROUP BY s1.f3 ORDER BY s1.f3;
 
 
 -- Testing joins
 
 EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8
-FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2
+FROM nested_tbl_1_n1 t1 JOIN nested_tbl_2_n1 t2
 ON t1.s1.f3.f4 = t2.s1.f6
 WHERE t2.s2.f8.f9 == FALSE;
 SELECT t1.s1.f3.f5, t2.s2.f8
-FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2
+FROM nested_tbl_1_n1 t1 JOIN nested_tbl_2_n1 t2
 ON t1.s1.f3.f4 = t2.s1.f6
 WHERE t2.s2.f8.f9 == FALSE;
 
 EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8
-FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n1 t1 JOIN nested_tbl_1_n1 t2
 ON t1.s1.f3.f4 = t2.s1.f6
 WHERE t2.s2.f8.f9 == TRUE;
 SELECT t1.s1.f3.f5, t2.s2.f8
-FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n1 t1 JOIN nested_tbl_1_n1 t2
 ON t1.s1.f3.f4 = t2.s1.f6
 WHERE t2.s2.f8.f9 == TRUE;
 
 EXPLAIN SELECT t1.s1.f3.f5
-FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2
 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE;
 SELECT t1.s1.f3.f5
-FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2
 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE;
 
 EXPLAIN SELECT t1.s1.f3.f5
-FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2
 ON t1.s1.f1 <> t2.s2.f8.f9;
 SELECT t1.s1.f3.f5
-FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2
 ON t1.s1.f1 <> t2.s2.f8.f9;
 
 EXPLAIN SELECT t1.s1.f3.f5
-FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2
 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9;
 SELECT t1.s1.f3.f5
-FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n1 t1 LEFT SEMI JOIN nested_tbl_1_n1 t2
 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9;
 
 -- Testing insert with aliases
 
-DROP TABLE IF EXISTS nested_tbl_3;
-CREATE TABLE nested_tbl_3 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS PARQUET;
+DROP TABLE IF EXISTS nested_tbl_3_n1;
+CREATE TABLE nested_tbl_3_n1 (f1 boolean, f2 string) PARTITIONED BY (f3 int) STORED AS PARQUET;
 
-INSERT OVERWRITE TABLE nested_tbl_3 PARTITION(f3)
+INSERT OVERWRITE TABLE nested_tbl_3_n1 PARTITION(f3)
 SELECT s1.f1 AS f1, S1.f2 AS f2, s1.f6 AS f3
-FROM nested_tbl_1;
+FROM nested_tbl_1_n1;
 
-SELECT * FROM nested_tbl_3;
+SELECT * FROM nested_tbl_3_n1;
 
 -- Testing select struct field from elements in array or map
 
 EXPLAIN
 SELECT count(s1.f6), s3.f12[0].f14
-FROM nested_tbl_1
+FROM nested_tbl_1_n1
 GROUP BY s3.f12[0].f14;
 
 SELECT count(s1.f6), s3.f12[0].f14
-FROM nested_tbl_1
+FROM nested_tbl_1_n1
 GROUP BY s3.f12[0].f14;
 
 EXPLAIN
 SELECT count(s1.f6), s4['key1'].f15
-FROM nested_tbl_1
+FROM nested_tbl_1_n1
 GROUP BY s4['key1'].f15;
 
 SELECT count(s1.f6), s4['key1'].f15
-FROM nested_tbl_1
+FROM nested_tbl_1_n1
 GROUP BY s4['key1'].f15;
 
 EXPLAIN
 SELECT count(s1.f6), s5.f16[0].f18.f19
-FROM nested_tbl_1
+FROM nested_tbl_1_n1
 GROUP BY s5.f16[0].f18.f19;
 
 SELECT count(s1.f6), s5.f16[0].f18.f19
-FROM nested_tbl_1
+FROM nested_tbl_1_n1
 GROUP BY s5.f16[0].f18.f19;
 
 EXPLAIN
 SELECT count(s1.f6), s5.f16.f18.f19
-FROM nested_tbl_1
+FROM nested_tbl_1_n1
 GROUP BY s5.f16.f18.f19;
 
 SELECT count(s1.f6), s5.f16.f18.f19
-FROM nested_tbl_1
+FROM nested_tbl_1_n1
 GROUP BY s5.f16.f18.f19;
 
 EXPLAIN
 SELECT count(s1.f6), s6['key1'].f20[0].f21.f22
-FROM nested_tbl_1
+FROM nested_tbl_1_n1
 GROUP BY s6['key1'].f20[0].f21.f22;
 
 SELECT count(s1.f6), s6['key1'].f20[0].f21.f22
-FROM nested_tbl_1
+FROM nested_tbl_1_n1
 GROUP BY s6['key1'].f20[0].f21.f22;
 
 EXPLAIN
 SELECT count(s1.f6), s6['key1'].f20.f21.f22
-FROM nested_tbl_1
+FROM nested_tbl_1_n1
 GROUP BY s6['key1'].f20.f21.f22;
 
 SELECT count(s1.f6), s6['key1'].f20.f21.f22
-FROM nested_tbl_1
+FROM nested_tbl_1_n1
 GROUP BY s6['key1'].f20.f21.f22;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/newline.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/newline.q b/ql/src/test/queries/clientpositive/newline.q
index 584ff98..f886a3e 100644
--- a/ql/src/test/queries/clientpositive/newline.q
+++ b/ql/src/test/queries/clientpositive/newline.q
@@ -5,56 +5,56 @@ set hive.transform.escape.input=true;
 
 -- SORT_QUERY_RESULTS
 
-create table tmp_tmp(key string, value string) stored as rcfile;
-insert overwrite table tmp_tmp
+create table tmp_tmp_n0(key string, value string) stored as rcfile;
+insert overwrite table tmp_tmp_n0
 SELECT TRANSFORM(key, value) USING
 'python newline.py' AS key, value FROM src limit 6;
 
-select * from tmp_tmp;
+select * from tmp_tmp_n0;
 
-drop table tmp_tmp;
+drop table tmp_tmp_n0;
 
 add file ../../data/scripts/escapednewline.py;
 add file ../../data/scripts/escapedtab.py;
 add file ../../data/scripts/doubleescapedtab.py;
 add file ../../data/scripts/escapedcarriagereturn.py;
 
-create table tmp_tmp(key string, value string) stored as rcfile;
-insert overwrite table tmp_tmp
+create table tmp_tmp_n0(key string, value string) stored as rcfile;
+insert overwrite table tmp_tmp_n0
 SELECT TRANSFORM(key, value) USING
 'python escapednewline.py' AS key, value FROM src limit 5;
 
-select * from tmp_tmp;
+select * from tmp_tmp_n0;
 
 SELECT TRANSFORM(key, value) USING
-'cat' AS (key, value) FROM tmp_tmp;
+'cat' AS (key, value) FROM tmp_tmp_n0;
 
-insert overwrite table tmp_tmp
+insert overwrite table tmp_tmp_n0
 SELECT TRANSFORM(key, value) USING
 'python escapedcarriagereturn.py' AS key, value FROM src limit 5;
 
-select * from tmp_tmp;
+select * from tmp_tmp_n0;
 
 SELECT TRANSFORM(key, value) USING
-'cat' AS (key, value) FROM tmp_tmp;
+'cat' AS (key, value) FROM tmp_tmp_n0;
 
-insert overwrite table tmp_tmp
+insert overwrite table tmp_tmp_n0
 SELECT TRANSFORM(key, value) USING
 'python escapedtab.py' AS key, value FROM src limit 5;
 
-select * from tmp_tmp;
+select * from tmp_tmp_n0;
 
 SELECT TRANSFORM(key, value) USING
-'cat' AS (key, value) FROM tmp_tmp;
+'cat' AS (key, value) FROM tmp_tmp_n0;
 
-insert overwrite table tmp_tmp
+insert overwrite table tmp_tmp_n0
 SELECT TRANSFORM(key, value) USING
 'python doubleescapedtab.py' AS key, value FROM src limit 5;
 
-select * from tmp_tmp;
+select * from tmp_tmp_n0;
 
 SELECT TRANSFORM(key, value) USING
-'cat' AS (key, value) FROM tmp_tmp;
+'cat' AS (key, value) FROM tmp_tmp_n0;
 
 SELECT key FROM (SELECT TRANSFORM ('a\tb', 'c') USING 'cat' AS (key, value) FROM src limit 1)a ORDER BY key ASC;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/notable_alias1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/notable_alias1.q b/ql/src/test/queries/clientpositive/notable_alias1.q
index 846660b..68b8982 100644
--- a/ql/src/test/queries/clientpositive/notable_alias1.q
+++ b/ql/src/test/queries/clientpositive/notable_alias1.q
@@ -1,12 +1,12 @@
 --! qt:dataset:src
 set hive.mapred.mode=nonstrict;
-CREATE TABLE dest1(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE;
+CREATE TABLE dest1_n4(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT '1234', key, count(1) WHERE src.key < 100 group by key;
+INSERT OVERWRITE TABLE dest1_n4 SELECT '1234', key, count(1) WHERE src.key < 100 group by key;
 
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT '1234', key, count(1) WHERE src.key < 100 group by key;
+INSERT OVERWRITE TABLE dest1_n4 SELECT '1234', key, count(1) WHERE src.key < 100 group by key;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n4.* FROM dest1_n4;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/notable_alias2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/notable_alias2.q b/ql/src/test/queries/clientpositive/notable_alias2.q
index be85807..c133824 100644
--- a/ql/src/test/queries/clientpositive/notable_alias2.q
+++ b/ql/src/test/queries/clientpositive/notable_alias2.q
@@ -1,12 +1,12 @@
 --! qt:dataset:src
 set hive.mapred.mode=nonstrict;
-CREATE TABLE dest1(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE;
+CREATE TABLE dest1_n50(dummy STRING, key INT, value DOUBLE) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key;
+INSERT OVERWRITE TABLE dest1_n50 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key;
 
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key;
+INSERT OVERWRITE TABLE dest1_n50 SELECT '1234', src.key, count(1) WHERE key < 100 group by src.key;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n50.* FROM dest1_n50;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/notable_alias3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/notable_alias3.q b/ql/src/test/queries/clientpositive/notable_alias3.q
index 9cb7605..414ba24 100644
--- a/ql/src/test/queries/clientpositive/notable_alias3.q
+++ b/ql/src/test/queries/clientpositive/notable_alias3.q
@@ -1,5 +1,5 @@
 --! qt:dataset:src
-CREATE TABLE dest1(c string, key INT, value DOUBLE) STORED AS TEXTFILE;
+CREATE TABLE dest1_n102(c string, key INT, value DOUBLE) STORED AS TEXTFILE;
 
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, sum(src.value) WHERE src.key < 100 group by key;
\ No newline at end of file
+INSERT OVERWRITE TABLE dest1_n102 SELECT '1234', src.key, sum(src.value) WHERE src.key < 100 group by key;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/nullMap.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/nullMap.q b/ql/src/test/queries/clientpositive/nullMap.q
index f272bb9..28d9e2d 100644
--- a/ql/src/test/queries/clientpositive/nullMap.q
+++ b/ql/src/test/queries/clientpositive/nullMap.q
@@ -1,6 +1,6 @@
 SET hive.vectorized.execution.enabled=false;
 
-create table map_txt (
+create table map_txt_n0 (
   id int,
   content map<int,string>
 )
@@ -9,8 +9,8 @@ null defined as '\\N'
 stored as textfile
 ;
 
-LOAD DATA LOCAL INPATH '../../data/files/mapNull.txt' INTO TABLE map_txt;
+LOAD DATA LOCAL INPATH '../../data/files/mapNull.txt' INTO TABLE map_txt_n0;
 
-select * from map_txt;
+select * from map_txt_n0;
 
-select id, map_keys(content) from map_txt;
+select id, map_keys(content) from map_txt_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/null_column.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/null_column.q b/ql/src/test/queries/clientpositive/null_column.q
index 4275ce5..5c0873b 100644
--- a/ql/src/test/queries/clientpositive/null_column.q
+++ b/ql/src/test/queries/clientpositive/null_column.q
@@ -8,9 +8,9 @@ load data local inpath '../../data/files/test.dat' overwrite into table temp_nul
 
 select null, null from temp_null;
 
-create table tt(a int, b string);
-insert overwrite table tt select null, null from temp_null;
-select * from tt;
+create table tt_n1(a int, b string);
+insert overwrite table tt_n1 select null, null from temp_null;
+select * from tt_n1;
 
 create table tt_b(a int, b string) row format serde "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe";
 insert overwrite table tt_b select null, null from temp_null;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/nullability_transitive_inference.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/nullability_transitive_inference.q b/ql/src/test/queries/clientpositive/nullability_transitive_inference.q
index ff0ce3a..ebde989 100644
--- a/ql/src/test/queries/clientpositive/nullability_transitive_inference.q
+++ b/ql/src/test/queries/clientpositive/nullability_transitive_inference.q
@@ -6,35 +6,35 @@ set hive.strict.checks.cartesian.product=false;
 set hive.stats.fetch.column.stats=true;
 set hive.materializedview.rewriting=true;
 
-create table emps (
+create table emps_n6 (
   empid int,
   deptno int,
   name varchar(256),
   salary float,
   commission int)
 stored as orc TBLPROPERTIES ('transactional'='true');
-insert into emps values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500),
+insert into emps_n6 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500),
   (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250), (110, 10, 'Bill', 10000, 250);
-analyze table emps compute statistics for columns;
+analyze table emps_n6 compute statistics for columns;
 
-create table depts (
+create table depts_n5 (
   deptno int,
   name varchar(256),
   locationid int)
 stored as orc TBLPROPERTIES ('transactional'='true');
-insert into depts values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20);
-analyze table depts compute statistics for columns;
+insert into depts_n5 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20);
+analyze table depts_n5 compute statistics for columns;
 
-alter table emps add constraint pk1 primary key (empid) disable novalidate rely;
-alter table depts add constraint pk2 primary key (deptno) disable novalidate rely;
+alter table emps_n6 add constraint pk1 primary key (empid) disable novalidate rely;
+alter table depts_n5 add constraint pk2 primary key (deptno) disable novalidate rely;
 
-alter table emps add constraint fk1 foreign key (deptno) references depts(deptno) disable novalidate rely;
+alter table emps_n6 add constraint fk1 foreign key (deptno) references depts_n5(deptno) disable novalidate rely;
 
 explain
-select empid from emps
-join depts using (deptno) where depts.deptno >= 20
-group by empid, depts.deptno;
+select empid from emps_n6
+join depts_n5 using (deptno) where depts_n5.deptno >= 20
+group by empid, depts_n5.deptno;
 
-select empid from emps
-join depts using (deptno) where depts.deptno >= 20
-group by empid, depts.deptno;
+select empid from emps_n6
+join depts_n5 using (deptno) where depts_n5.deptno >= 20
+group by empid, depts_n5.deptno;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/nullformatCTAS.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/nullformatCTAS.q b/ql/src/test/queries/clientpositive/nullformatCTAS.q
index d077981..093742f 100644
--- a/ql/src/test/queries/clientpositive/nullformatCTAS.q
+++ b/ql/src/test/queries/clientpositive/nullformatCTAS.q
@@ -1,15 +1,15 @@
 -- base table with null data
-DROP TABLE IF EXISTS base_tab;
-CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab;
-DESCRIBE EXTENDED base_tab;
+DROP TABLE IF EXISTS base_tab_n2;
+CREATE TABLE base_tab_n2(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab_n2;
+DESCRIBE EXTENDED base_tab_n2;
 
 -- table with non-default null format
 DROP TABLE IF EXISTS null_tab3;
 EXPLAIN CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull'
-   AS SELECT a, b FROM base_tab;
+   AS SELECT a, b FROM base_tab_n2;
 CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull'
-   AS SELECT a, b FROM base_tab;
+   AS SELECT a, b FROM base_tab_n2;
 DESCRIBE EXTENDED null_tab3;
 SHOW CREATE TABLE null_tab3;
 
@@ -21,4 +21,4 @@ SELECT * FROM null_tab3;
 
 
 DROP TABLE null_tab3;
-DROP TABLE base_tab;
+DROP TABLE base_tab_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/nullformatdir.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/nullformatdir.q b/ql/src/test/queries/clientpositive/nullformatdir.q
index d298638..60637ab 100644
--- a/ql/src/test/queries/clientpositive/nullformatdir.q
+++ b/ql/src/test/queries/clientpositive/nullformatdir.q
@@ -1,13 +1,13 @@
 -- base table with null data
-DROP TABLE IF EXISTS base_tab;
-CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab;
-DESCRIBE EXTENDED base_tab;
+DROP TABLE IF EXISTS base_tab_n1;
+CREATE TABLE base_tab_n1(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab_n1;
+DESCRIBE EXTENDED base_tab_n1;
 
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/hive_test/nullformat/tmp;
 dfs -rmr ${system:test.tmp.dir}/hive_test/nullformat/*;
 INSERT OVERWRITE LOCAL DIRECTORY '${system:test.tmp.dir}/hive_test/nullformat'
-   ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' SELECT a,b FROM base_tab;
+   ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' SELECT a,b FROM base_tab_n1;
 dfs -cat ${system:test.tmp.dir}/hive_test/nullformat/000000_0;
 
 -- load the exported data back into a table with same null format and verify null values
@@ -18,4 +18,4 @@ SELECT * FROM null_tab2;
 
 
 dfs -rmr ${system:test.tmp.dir}/hive_test/nullformat;
-DROP TABLE base_tab;
+DROP TABLE base_tab_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/nullgroup3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/nullgroup3.q b/ql/src/test/queries/clientpositive/nullgroup3.q
index 282f6c3..af6689e 100644
--- a/ql/src/test/queries/clientpositive/nullgroup3.q
+++ b/ql/src/test/queries/clientpositive/nullgroup3.q
@@ -1,29 +1,29 @@
 set hive.mapred.mode=nonstrict;
-CREATE TABLE tstparttbl(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-09');
-LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-08');
+CREATE TABLE tstparttbl_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-08');
 explain
-select count(1) from tstparttbl;
-select count(1) from tstparttbl;
+select count(1) from tstparttbl_n0;
+select count(1) from tstparttbl_n0;
 
-CREATE TABLE tstparttbl2(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-09');
-LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-08');
+CREATE TABLE tstparttbl2_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-08');
 explain
-select count(1) from tstparttbl2;
-select count(1) from tstparttbl2;
-DROP TABLE tstparttbl;
-CREATE TABLE tstparttbl(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-09');
-LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-08');
+select count(1) from tstparttbl2_n0;
+select count(1) from tstparttbl2_n0;
+DROP TABLE tstparttbl_n0;
+CREATE TABLE tstparttbl_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl_n0 PARTITION (ds='2008-04-08');
 explain
-select count(1) from tstparttbl;
-select count(1) from tstparttbl;
+select count(1) from tstparttbl_n0;
+select count(1) from tstparttbl_n0;
 
-DROP TABLE tstparttbl2;
-CREATE TABLE tstparttbl2(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-09');
-LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-08');
+DROP TABLE tstparttbl2_n0;
+CREATE TABLE tstparttbl2_n0(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2_n0 PARTITION (ds='2008-04-08');
 explain
-select count(1) from tstparttbl2;
-select count(1) from tstparttbl2;
+select count(1) from tstparttbl2_n0;
+select count(1) from tstparttbl2_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/optimize_filter_literal.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/optimize_filter_literal.q b/ql/src/test/queries/clientpositive/optimize_filter_literal.q
index 28bed09..b0ce4ae 100644
--- a/ql/src/test/queries/clientpositive/optimize_filter_literal.q
+++ b/ql/src/test/queries/clientpositive/optimize_filter_literal.q
@@ -12,38 +12,38 @@ set hive.vectorized.execution.enabled=true;
 
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE;
-CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE srcbucket_mapjoin_n21(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE tab_part_n13 (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE;
+CREATE TABLE srcbucket_mapjoin_part_n22 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
 
-load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_n21 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin_n21 partition(ds='2008-04-08');
 
-load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part_n22 partition(ds='2008-04-08');
 
 
 
 set hive.optimize.bucketingsorting=false;
-insert overwrite table tab_part partition (ds='2008-04-08')
-select key,value from srcbucket_mapjoin_part;
+insert overwrite table tab_part_n13 partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part_n22;
 
-analyze table tab_part partition (ds='2008-04-08') compute statistics for columns;
+analyze table tab_part_n13 partition (ds='2008-04-08') compute statistics for columns;
 
-CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE;
-insert overwrite table tab partition (ds='2008-04-08')
-select key,value from srcbucket_mapjoin;
+CREATE TABLE tab_n14(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE;
+insert overwrite table tab_n14 partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_n21;
 
-analyze table tab partition (ds='2008-04-08') compute statistics for columns;
+analyze table tab_n14 partition (ds='2008-04-08') compute statistics for columns;
 
 set hive.join.emit.interval=2;
 
 set mapred.reduce.tasks=3;
 
 select * from
-(select * from tab where tab.key = 0)a
+(select * from tab_n14 where tab_n14.key = 0)a
 full outer join
-(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key;
+(select * from tab_part_n13 where tab_part_n13.key = 98)b join tab_part_n13 c on a.key = b.key and b.key = c.key;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/optimize_join_ptp.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/optimize_join_ptp.q b/ql/src/test/queries/clientpositive/optimize_join_ptp.q
index 5807ec3..81d4f5e 100644
--- a/ql/src/test/queries/clientpositive/optimize_join_ptp.q
+++ b/ql/src/test/queries/clientpositive/optimize_join_ptp.q
@@ -1,16 +1,16 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 
-create table t1 (v string, k int);
-insert into t1 values ('people', 10), ('strangers', 20), ('parents', 30);
+create table t1_n97 (v string, k int);
+insert into t1_n97 values ('people', 10), ('strangers', 20), ('parents', 30);
 
-create table t2 (v string, k double);
-insert into t2 values ('people', 10), ('strangers', 20), ('parents', 30);
+create table t2_n60 (v string, k double);
+insert into t2_n60 values ('people', 10), ('strangers', 20), ('parents', 30);
 
 -- should not produce exceptions
 explain
-select * from t1 where t1.k in (select t2.k from t2 where t2.v='people') and t1.k<15;
+select * from t1_n97 where t1_n97.k in (select t2_n60.k from t2_n60 where t2_n60.v='people') and t1_n97.k<15;
 
-select * from t1 where t1.k in (select t2.k from t2 where t2.v='people') and t1.k<15;
+select * from t1_n97 where t1_n97.k in (select t2_n60.k from t2_n60 where t2_n60.v='people') and t1_n97.k<15;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_analyze.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_analyze.q b/ql/src/test/queries/clientpositive/orc_analyze.q
index 2683fdc..aea09d4 100644
--- a/ql/src/test/queries/clientpositive/orc_analyze.q
+++ b/ql/src/test/queries/clientpositive/orc_analyze.q
@@ -3,7 +3,7 @@ set hive.mapred.mode=nonstrict;
 set hive.exec.submitviachild=false;
 set hive.exec.submit.local.task.via.child=false;
 
-CREATE TABLE orc_create_people_staging (
+CREATE TABLE orc_create_people_staging_n0 (
   id int,
   first_name string,
   last_name string,
@@ -12,14 +12,14 @@ CREATE TABLE orc_create_people_staging (
   start_date timestamp,
   state string);
 
-LOAD DATA LOCAL INPATH '../../data/files/orc_create_people.txt' OVERWRITE INTO TABLE orc_create_people_staging;
+LOAD DATA LOCAL INPATH '../../data/files/orc_create_people.txt' OVERWRITE INTO TABLE orc_create_people_staging_n0;
 
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 set hive.stats.autogather=false;
 -- non-partitioned table
 -- partial scan gather
-CREATE TABLE orc_create_people (
+CREATE TABLE orc_create_people_n0 (
   id int,
   first_name string,
   last_name string,
@@ -29,19 +29,19 @@ CREATE TABLE orc_create_people (
   state string)
 STORED AS orc;
 
-INSERT OVERWRITE TABLE orc_create_people SELECT * FROM orc_create_people_staging ORDER BY id;
+INSERT OVERWRITE TABLE orc_create_people_n0 SELECT * FROM orc_create_people_staging_n0 ORDER BY id;
 
 set hive.stats.autogather = true;
-analyze table orc_create_people compute statistics;
-desc formatted orc_create_people;
+analyze table orc_create_people_n0 compute statistics;
+desc formatted orc_create_people_n0;
 
-analyze table orc_create_people compute statistics noscan;
-desc formatted orc_create_people;
+analyze table orc_create_people_n0 compute statistics noscan;
+desc formatted orc_create_people_n0;
 
-drop table orc_create_people;
+drop table orc_create_people_n0;
 
 -- auto stats gather
-CREATE TABLE orc_create_people (
+CREATE TABLE orc_create_people_n0 (
   id int,
   first_name string,
   last_name string,
@@ -51,16 +51,16 @@ CREATE TABLE orc_create_people (
   state string)
 STORED AS orc;
 
-INSERT OVERWRITE TABLE orc_create_people SELECT * FROM orc_create_people_staging ORDER BY id;
+INSERT OVERWRITE TABLE orc_create_people_n0 SELECT * FROM orc_create_people_staging_n0 ORDER BY id;
 
-desc formatted orc_create_people;
+desc formatted orc_create_people_n0;
 
-drop table orc_create_people;
+drop table orc_create_people_n0;
 
 set hive.stats.autogather=false;
 -- partitioned table
 -- partial scan gather
-CREATE TABLE orc_create_people (
+CREATE TABLE orc_create_people_n0 (
   id int,
   first_name string,
   last_name string,
@@ -70,22 +70,22 @@ CREATE TABLE orc_create_people (
 PARTITIONED BY (state string)
 STORED AS orc;
 
-INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
-  SELECT * FROM orc_create_people_staging ORDER BY id;
+INSERT OVERWRITE TABLE orc_create_people_n0 PARTITION (state)
+  SELECT * FROM orc_create_people_staging_n0 ORDER BY id;
 
 set hive.stats.autogather = true;
-analyze table orc_create_people partition(state) compute statistics;
-desc formatted orc_create_people partition(state="Ca");
-desc formatted orc_create_people partition(state="Or");
+analyze table orc_create_people_n0 partition(state) compute statistics;
+desc formatted orc_create_people_n0 partition(state="Ca");
+desc formatted orc_create_people_n0 partition(state="Or");
 
-analyze table orc_create_people partition(state) compute statistics noscan;
-desc formatted orc_create_people partition(state="Ca");
-desc formatted orc_create_people partition(state="Or");
+analyze table orc_create_people_n0 partition(state) compute statistics noscan;
+desc formatted orc_create_people_n0 partition(state="Ca");
+desc formatted orc_create_people_n0 partition(state="Or");
 
-drop table orc_create_people;
+drop table orc_create_people_n0;
 
 -- auto stats gather
-CREATE TABLE orc_create_people (
+CREATE TABLE orc_create_people_n0 (
   id int,
   first_name string,
   last_name string,
@@ -95,18 +95,18 @@ CREATE TABLE orc_create_people (
 PARTITIONED BY (state string)
 STORED AS orc;
 
-INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
-  SELECT * FROM orc_create_people_staging ORDER BY id;
+INSERT OVERWRITE TABLE orc_create_people_n0 PARTITION (state)
+  SELECT * FROM orc_create_people_staging_n0 ORDER BY id;
 
-desc formatted orc_create_people partition(state="Ca");
-desc formatted orc_create_people partition(state="Or");
+desc formatted orc_create_people_n0 partition(state="Ca");
+desc formatted orc_create_people_n0 partition(state="Or");
 
-drop table orc_create_people;
+drop table orc_create_people_n0;
 
 set hive.stats.autogather=false;
 -- partitioned and bucketed table
 -- partial scan gather
-CREATE TABLE orc_create_people (
+CREATE TABLE orc_create_people_n0 (
   id int,
   first_name string,
   last_name string,
@@ -119,22 +119,22 @@ sorted by (last_name)
 into 4 buckets
 STORED AS orc;
 
-INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
-  SELECT * FROM orc_create_people_staging ORDER BY id;
+INSERT OVERWRITE TABLE orc_create_people_n0 PARTITION (state)
+  SELECT * FROM orc_create_people_staging_n0 ORDER BY id;
 
 set hive.stats.autogather = true;
-analyze table orc_create_people partition(state) compute statistics;
-desc formatted orc_create_people partition(state="Ca");
-desc formatted orc_create_people partition(state="Or");
+analyze table orc_create_people_n0 partition(state) compute statistics;
+desc formatted orc_create_people_n0 partition(state="Ca");
+desc formatted orc_create_people_n0 partition(state="Or");
 
-analyze table orc_create_people partition(state) compute statistics noscan;
-desc formatted orc_create_people partition(state="Ca");
-desc formatted orc_create_people partition(state="Or");
+analyze table orc_create_people_n0 partition(state) compute statistics noscan;
+desc formatted orc_create_people_n0 partition(state="Ca");
+desc formatted orc_create_people_n0 partition(state="Or");
 
-drop table orc_create_people;
+drop table orc_create_people_n0;
 
 -- auto stats gather
-CREATE TABLE orc_create_people (
+CREATE TABLE orc_create_people_n0 (
   id int,
   first_name string,
   last_name string,
@@ -147,19 +147,19 @@ sorted by (last_name)
 into 4 buckets
 STORED AS orc;
 
-INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
-  SELECT * FROM orc_create_people_staging ORDER BY id;
+INSERT OVERWRITE TABLE orc_create_people_n0 PARTITION (state)
+  SELECT * FROM orc_create_people_staging_n0 ORDER BY id;
 
-desc formatted orc_create_people partition(state="Ca");
-desc formatted orc_create_people partition(state="Or");
+desc formatted orc_create_people_n0 partition(state="Ca");
+desc formatted orc_create_people_n0 partition(state="Or");
 
-drop table orc_create_people;
+drop table orc_create_people_n0;
 
 set hive.stats.autogather=false;
 -- create table with partitions containing text and ORC files.
 -- ORC files implements StatsProvidingRecordReader but text files does not.
 -- So the partition containing text file should not have statistics.
-CREATE TABLE orc_create_people (
+CREATE TABLE orc_create_people_n0 (
   id int,
   first_name string,
   last_name string,
@@ -169,14 +169,14 @@ CREATE TABLE orc_create_people (
 PARTITIONED BY (state string)
 STORED AS orc;
 
-INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
-  SELECT * FROM orc_create_people_staging ORDER BY id;
+INSERT OVERWRITE TABLE orc_create_people_n0 PARTITION (state)
+  SELECT * FROM orc_create_people_staging_n0 ORDER BY id;
 
 set hive.stats.autogather = true;
-analyze table orc_create_people partition(state) compute statistics;
-desc formatted orc_create_people partition(state="Ca");
+analyze table orc_create_people_n0 partition(state) compute statistics;
+desc formatted orc_create_people_n0 partition(state="Ca");
 
-analyze table orc_create_people partition(state) compute statistics noscan;
-desc formatted orc_create_people partition(state="Ca");
+analyze table orc_create_people_n0 partition(state) compute statistics noscan;
+desc formatted orc_create_people_n0 partition(state="Ca");
 
-drop table orc_create_people;
+drop table orc_create_people_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_create.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_create.q b/ql/src/test/queries/clientpositive/orc_create.q
index 930a7cb..6d41009 100644
--- a/ql/src/test/queries/clientpositive/orc_create.q
+++ b/ql/src/test/queries/clientpositive/orc_create.q
@@ -6,13 +6,13 @@ set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
 DROP TABLE orc_create;
-DROP TABLE orc_create_complex;
-DROP TABLE orc_create_staging;
+DROP TABLE orc_create_complex_n1;
+DROP TABLE orc_create_staging_n1;
 DROP TABLE orc_create_people_staging;
 DROP TABLE orc_create_people;
 DROP TABLE if exists orc_create_cprl;
 
-CREATE TABLE orc_create_staging (
+CREATE TABLE orc_create_staging_n1 (
   str STRING,
   mp  MAP<STRING,STRING>,
   lst ARRAY<STRING>,
@@ -22,7 +22,7 @@ CREATE TABLE orc_create_staging (
     COLLECTION ITEMS TERMINATED BY ','
     MAP KEYS TERMINATED BY ':';
 
-DESCRIBE FORMATTED orc_create_staging;
+DESCRIBE FORMATTED orc_create_staging_n1;
 
 CREATE TABLE orc_create (key INT, value STRING)
    PARTITIONED BY (ds string)
@@ -52,26 +52,26 @@ set hive.default.fileformat=TextFile;
 
 DESCRIBE FORMATTED orc_create;
 
-CREATE TABLE orc_create_complex (
+CREATE TABLE orc_create_complex_n1 (
   str STRING,
   mp  MAP<STRING,STRING>,
   lst ARRAY<STRING>,
   strct STRUCT<A:STRING,B:STRING>
 ) STORED AS ORC;
 
-DESCRIBE FORMATTED orc_create_complex;
+DESCRIBE FORMATTED orc_create_complex_n1;
 
-LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging;
+LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging_n1;
 
-SELECT * from orc_create_staging;
+SELECT * from orc_create_staging_n1;
 
-INSERT OVERWRITE TABLE orc_create_complex SELECT * FROM orc_create_staging;
+INSERT OVERWRITE TABLE orc_create_complex_n1 SELECT * FROM orc_create_staging_n1;
 
-SELECT * from orc_create_complex;
-SELECT str from orc_create_complex;
-SELECT mp from orc_create_complex;
-SELECT lst from orc_create_complex;
-SELECT strct from orc_create_complex;
+SELECT * from orc_create_complex_n1;
+SELECT str from orc_create_complex_n1;
+SELECT mp from orc_create_complex_n1;
+SELECT lst from orc_create_complex_n1;
+SELECT strct from orc_create_complex_n1;
 
 CREATE TABLE orc_create_people_staging (
   id int,
@@ -135,8 +135,8 @@ SELECT 1 from src limit 1;
 SELECT * from orc_create_cprl;
 
 DROP TABLE orc_create;
-DROP TABLE orc_create_complex;
-DROP TABLE orc_create_staging;
+DROP TABLE orc_create_complex_n1;
+DROP TABLE orc_create_staging_n1;
 DROP TABLE orc_create_people_staging;
 DROP TABLE orc_create_people;
 DROP TABLE orc_create_cprl;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q b/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q
index 1848500..1429a6d 100644
--- a/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q
+++ b/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q
@@ -7,19 +7,19 @@ set hive.exec.orc.dictionary.key.size.threshold=-1;
 -- Tests that the data can be read back correctly when a string column is stored
 -- without dictionary encoding
 
-CREATE TABLE test_orc (key STRING)
+CREATE TABLE test_orc_n5 (key STRING)
 ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' 
 STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' 
 OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat';
 
 -- should be single split
-INSERT OVERWRITE TABLE test_orc SELECT key FROM src TABLESAMPLE (10 ROWS);
+INSERT OVERWRITE TABLE test_orc_n5 SELECT key FROM src TABLESAMPLE (10 ROWS);
 
 -- Test reading the column back
 
-SELECT * FROM test_orc; 
+SELECT * FROM test_orc_n5; 
 
-ALTER TABLE test_orc SET SERDEPROPERTIES ('orc.stripe.size' = '1');
+ALTER TABLE test_orc_n5 SET SERDEPROPERTIES ('orc.stripe.size' = '1');
 
 CREATE TABLE src_thousand(key STRING) STORED AS TEXTFILE;
 LOAD DATA LOCAL INPATH '../../data/files/kv1kv2.cogroup.txt' 
@@ -35,7 +35,7 @@ set hive.exec.orc.dictionary.key.size.threshold=0.5;
 -- dictionary encoded. The final stripe will have 630 out of 1000 and be 
 -- direct encoded.
 
-INSERT OVERWRITE TABLE test_orc
+INSERT OVERWRITE TABLE test_orc_n5
 SELECT key FROM (
 SELECT CONCAT("a", key) AS key FROM src_thousand
 UNION ALL
@@ -60,4 +60,4 @@ UNION ALL
 SELECT CONCAT("k", key) AS key FROM src_thousand
 ) a ORDER BY key LIMIT 11000;
 
-SELECT SUM(HASH(key)) FROM test_orc;
+SELECT SUM(HASH(key)) FROM test_orc_n5;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_diff_part_cols.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_diff_part_cols.q b/ql/src/test/queries/clientpositive/orc_diff_part_cols.q
index 3a2cbd4..ea9623e 100644
--- a/ql/src/test/queries/clientpositive/orc_diff_part_cols.q
+++ b/ql/src/test/queries/clientpositive/orc_diff_part_cols.q
@@ -6,7 +6,7 @@ set hive.mapred.mode=nonstrict;
 
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE test_orc (key STRING)
+CREATE TABLE test_orc_n0 (key STRING)
 PARTITIONED BY (part STRING)
 ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' 
 STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' 
@@ -18,10 +18,10 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 -- to another partition
 -- This can produce unexpected results with CombineHiveInputFormat
 
-INSERT OVERWRITE TABLE test_orc PARTITION (part = '1') SELECT key FROM src tablesample (5 rows);
+INSERT OVERWRITE TABLE test_orc_n0 PARTITION (part = '1') SELECT key FROM src tablesample (5 rows);
 
-ALTER TABLE test_orc ADD COLUMNS (cnt INT);
+ALTER TABLE test_orc_n0 ADD COLUMNS (cnt INT);
 
-INSERT OVERWRITE TABLE test_orc PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key LIMIT 5;
+INSERT OVERWRITE TABLE test_orc_n0 PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key LIMIT 5;
 
-SELECT * FROM test_orc;
+SELECT * FROM test_orc_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q b/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q
index 41db00e..d7fdbc8 100644
--- a/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q
+++ b/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q
@@ -7,11 +7,11 @@ set hive.vectorized.execution.enabled=false;
 
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE test_orc (key STRING)
+CREATE TABLE test_orc_n4 (key STRING)
 STORED AS ORC;
 
-INSERT OVERWRITE TABLE test_orc SELECT key FROM src LIMIT 5;
+INSERT OVERWRITE TABLE test_orc_n4 SELECT key FROM src LIMIT 5;
 
-ALTER TABLE test_orc ADD COLUMNS (value STRING);
+ALTER TABLE test_orc_n4 ADD COLUMNS (value STRING);
 
-SELECT * FROM test_orc;
+SELECT * FROM test_orc_n4;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_empty_files.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_empty_files.q b/ql/src/test/queries/clientpositive/orc_empty_files.q
index 1ae6e68..f46d7b5 100644
--- a/ql/src/test/queries/clientpositive/orc_empty_files.q
+++ b/ql/src/test/queries/clientpositive/orc_empty_files.q
@@ -1,7 +1,7 @@
 --! qt:dataset:src
 
 set hive.vectorized.execution.enabled=false;
-CREATE TABLE test_orc (key STRING, cnt INT)
+CREATE TABLE test_orc_n2 (key STRING, cnt INT)
 CLUSTERED BY (key) INTO 3 BUCKETS
 ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' 
 STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' 
@@ -16,6 +16,6 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 -- containing data and a file containing data followed by an empty file.
 -- This can produce unexpected results with CombineHiveInputFormat
 
-INSERT OVERWRITE TABLE test_orc SELECT one, COUNT(*) FROM (SELECT 1 AS one FROM src) a GROUP BY one;
+INSERT OVERWRITE TABLE test_orc_n2 SELECT one, COUNT(*) FROM (SELECT 1 AS one FROM src) a GROUP BY one;
 
-SELECT count(*) FROM test_orc; 
+SELECT count(*) FROM test_orc_n2; 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_empty_strings.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_empty_strings.q b/ql/src/test/queries/clientpositive/orc_empty_strings.q
index 53902f0..10650bf 100644
--- a/ql/src/test/queries/clientpositive/orc_empty_strings.q
+++ b/ql/src/test/queries/clientpositive/orc_empty_strings.q
@@ -4,19 +4,19 @@ set hive.vectorized.execution.enabled=false;
 
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE test_orc (key STRING)
+CREATE TABLE test_orc_n3 (key STRING)
 ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' 
 STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' 
 OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat';
 
-INSERT OVERWRITE TABLE test_orc SELECT '' FROM src tablesample (10 rows);
+INSERT OVERWRITE TABLE test_orc_n3 SELECT '' FROM src tablesample (10 rows);
 
 -- Test reading a column which is just empty strings
 
-SELECT * FROM test_orc; 
+SELECT * FROM test_orc_n3; 
 
-INSERT OVERWRITE TABLE test_orc SELECT IF (key % 3 = 0, key, '') FROM src tablesample (10 rows);
+INSERT OVERWRITE TABLE test_orc_n3 SELECT IF (key % 3 = 0, key, '') FROM src tablesample (10 rows);
 
 -- Test reading a column which has some empty strings
 
-SELECT * FROM test_orc;
+SELECT * FROM test_orc_n3;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q b/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q
index 9c33048..ba6dc05 100644
--- a/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q
+++ b/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q
@@ -11,9 +11,9 @@ ALTER TABLE test_orc SET SERDEPROPERTIES ('orc.row.index.stride' = '1000');
 -- this produces the effect that the number of non-null rows between the last and second
 -- to last index stride are the same (there's only two index strides)
 
-CREATE TABLE src_null(a STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null;
+CREATE TABLE src_null_n0(a STRING) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null_n0;
 
-INSERT OVERWRITE TABLE test_orc SELECT a FROM src_null;
+INSERT OVERWRITE TABLE test_orc SELECT a FROM src_null_n0;
 
 SELECT * FROM test_orc LIMIT 5;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_file_dump.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_file_dump.q b/ql/src/test/queries/clientpositive/orc_file_dump.q
index 754e121..0aec810 100644
--- a/ql/src/test/queries/clientpositive/orc_file_dump.q
+++ b/ql/src/test/queries/clientpositive/orc_file_dump.q
@@ -1,7 +1,7 @@
 set hive.vectorized.execution.enabled=false;
 set hive.mapred.mode=nonstrict;
 
-CREATE TABLE staging(t tinyint,
+CREATE TABLE staging_n4(t tinyint,
            si smallint,
            i int,
            b bigint,
@@ -15,9 +15,9 @@ CREATE TABLE staging(t tinyint,
 ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
 STORED AS TEXTFILE;
 
-LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging_n4;
 
-CREATE TABLE orc_ppd(t tinyint,
+CREATE TABLE orc_ppd_n0(t tinyint,
            si smallint,
            i int,
            b bigint,
@@ -30,17 +30,17 @@ CREATE TABLE orc_ppd(t tinyint,
            bin binary)
 STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
 
-insert overwrite table orc_ppd select * from staging;
+insert overwrite table orc_ppd_n0 select * from staging_n4;
 
 SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecOrcFileDump;
 
-select * from orc_ppd limit 1;
+select * from orc_ppd_n0 limit 1;
 
-alter table orc_ppd set tblproperties("orc.bloom.filter.fpp"="0.01");
+alter table orc_ppd_n0 set tblproperties("orc.bloom.filter.fpp"="0.01");
 
-insert overwrite table orc_ppd select * from staging;
+insert overwrite table orc_ppd_n0 select * from staging_n4;
 
-select * from orc_ppd limit 1;
+select * from orc_ppd_n0 limit 1;
 
 CREATE TABLE orc_ppd_part(t tinyint,
            si smallint,
@@ -55,6 +55,6 @@ CREATE TABLE orc_ppd_part(t tinyint,
            bin binary)
 PARTITIONED BY (ds string, hr int) STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
 
-insert overwrite table orc_ppd_part partition(ds = "2015", hr = 10) select * from staging;
+insert overwrite table orc_ppd_part partition(ds = "2015", hr = 10) select * from staging_n4;
 
 select * from orc_ppd_part limit 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
index 93cd1bd..3f34500 100644
--- a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
+++ b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
@@ -4,7 +4,7 @@ set hive.vectorized.execution.enabled=false;
 set hive.mapred.mode=nonstrict;
 set hive.metastore.disallow.incompatible.col.type.changes=false;
 
-create table if not exists alltypes (
+create table if not exists alltypes_n0 (
  bo boolean,
  ti tinyint,
  si smallint,
@@ -25,7 +25,7 @@ create table if not exists alltypes (
 collection items terminated by ','
 map keys terminated by ':' stored as textfile;
 
-create table if not exists alltypes_orc (
+create table if not exists alltypes_orc_n0 (
  bo boolean,
  ti tinyint,
  si smallint,
@@ -44,26 +44,26 @@ create table if not exists alltypes_orc (
  st struct<c1:int, c2:string>
 ) stored as orc;
 
-load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes;
+load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes_n0;
 
-insert overwrite table alltypes_orc select * from alltypes;
+insert overwrite table alltypes_orc_n0 select * from alltypes_n0;
 
-select * from alltypes_orc;
+select * from alltypes_orc_n0;
 
 SET hive.exec.schema.evolution=true;
 
-alter table alltypes_orc change si si int;
-select * from alltypes_orc;
+alter table alltypes_orc_n0 change si si int;
+select * from alltypes_orc_n0;
 
-alter table alltypes_orc change si si bigint;
-alter table alltypes_orc change i i bigint;
-select * from alltypes_orc;
+alter table alltypes_orc_n0 change si si bigint;
+alter table alltypes_orc_n0 change i i bigint;
+select * from alltypes_orc_n0;
 
 set hive.vectorized.execution.enabled=true;
 set hive.fetch.task.conversion=none;
 
-explain select ti, si, i, bi from alltypes_orc;
-select ti, si, i, bi from alltypes_orc;
+explain select ti, si, i, bi from alltypes_orc_n0;
+select ti, si, i, bi from alltypes_orc_n0;
 
 SET hive.exec.schema.evolution=false;