You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2018/05/25 00:58:22 UTC

[50/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 17d6a95..4590b7a 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -34,7 +34,9 @@ disabled.query.files=ql_rewrite_gbtoidx.q,\
   stats_filemetadata.q,\
   cbo_rp_insert.q,\
   cbo_rp_lineage2.q,\
-  union_stats.q
+  union_stats.q,\
+  sample2.q,\
+  sample4.q
 
 
 # NOTE: Add tests to minitez only if it is very

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/acid_nullscan.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_nullscan.q b/ql/src/test/queries/clientpositive/acid_nullscan.q
index 3c71242..0b36708 100644
--- a/ql/src/test/queries/clientpositive/acid_nullscan.q
+++ b/ql/src/test/queries/clientpositive/acid_nullscan.q
@@ -6,13 +6,13 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.vectorized.execution.enabled=true;
 
-CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');
-insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10;
-insert into table acid_vectorized values (1, 'bar');
+CREATE TABLE acid_vectorized_n1(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');
+insert into table acid_vectorized_n1 select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10;
+insert into table acid_vectorized_n1 values (1, 'bar');
 
 
 explain extended
-select sum(a) from acid_vectorized where false;
+select sum(a) from acid_vectorized_n1 where false;
 
-select sum(a) from acid_vectorized where false;
+select sum(a) from acid_vectorized_n1 where false;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/acid_vectorization_original.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_vectorization_original.q b/ql/src/test/queries/clientpositive/acid_vectorization_original.q
index 0b91f69..5082aed 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization_original.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization_original.q
@@ -26,7 +26,7 @@ CREATE TEMPORARY FUNCTION runWorker AS 'org.apache.hadoop.hive.ql.udf.UDFRunWork
 create table mydual(a int);
 insert into mydual values(1);
 
-CREATE TABLE over10k(t tinyint,
+CREATE TABLE over10k_n2(t tinyint,
            si smallint,
            i int,
            b bigint,
@@ -41,7 +41,7 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
 STORED AS TEXTFILE;
 
 --oddly this has 9999 rows not > 10K
-LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over10k;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over10k_n2;
 
 CREATE TABLE over10k_orc_bucketed(t tinyint,
            si smallint,
@@ -56,14 +56,14 @@ CREATE TABLE over10k_orc_bucketed(t tinyint,
            bin binary) CLUSTERED BY(si) INTO 4 BUCKETS STORED AS ORC;
 
 -- this produces about 250 distinct values across all 4 equivalence classes
-select distinct si, si%4 from over10k order by si;
+select distinct si, si%4 from over10k_n2 order by si;
 
--- explain insert into over10k_orc_bucketed select * from over10k;
-insert into over10k_orc_bucketed select * from over10k;
+-- explain insert into over10k_orc_bucketed select * from over10k_n2;
+insert into over10k_orc_bucketed select * from over10k_n2;
 
 dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/over10k_orc_bucketed;
 -- create copy_N files
-insert into over10k_orc_bucketed select * from over10k;
+insert into over10k_orc_bucketed select * from over10k_n2;
 
 -- this output of this is masked in .out - it is visible in .orig
 dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/over10k_orc_bucketed;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q b/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q
index 00a3ab2..50fb3e9 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q
@@ -23,10 +23,10 @@ set hive.explain.user=false;
 
 
 CREATE TEMPORARY FUNCTION runWorker AS 'org.apache.hadoop.hive.ql.udf.UDFRunWorker';
-create table mydual(a int);
-insert into mydual values(1);
+create table mydual_n0(a int);
+insert into mydual_n0 values(1);
 
-CREATE TABLE over10k(t tinyint,
+CREATE TABLE over10k_n9(t tinyint,
            si smallint,
            i int,
            b bigint,
@@ -41,9 +41,9 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
 STORED AS TEXTFILE;
 
 --oddly this has 9999 rows not > 10K
-LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over10k;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over10k_n9;
 
-CREATE TABLE over10k_orc_bucketed(t tinyint,
+CREATE TABLE over10k_orc_bucketed_n0(t tinyint,
            si smallint,
            i int,
            b bigint,
@@ -56,42 +56,42 @@ CREATE TABLE over10k_orc_bucketed(t tinyint,
            bin binary) CLUSTERED BY(si) INTO 4 BUCKETS STORED AS ORC;
 
 -- this produces about 250 distinct values across all 4 equivalence classes
-select distinct si, si%4 from over10k order by si;
+select distinct si, si%4 from over10k_n9 order by si;
 
--- explain insert into over10k_orc_bucketed select * from over10k;
-insert into over10k_orc_bucketed select * from over10k;
+-- explain insert into over10k_orc_bucketed_n0 select * from over10k_n9;
+insert into over10k_orc_bucketed_n0 select * from over10k_n9;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/over10k_orc_bucketed;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/over10k_orc_bucketed_n0;
 -- create copy_N files
-insert into over10k_orc_bucketed select * from over10k;
+insert into over10k_orc_bucketed_n0 select * from over10k_n9;
 
 -- this output of this is masked in .out - it is visible in .orig
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/over10k_orc_bucketed;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/over10k_orc_bucketed_n0;
 
 --this actually shows the data files in the .out on Tez but not LLAP
-select distinct 7 as seven, INPUT__FILE__NAME from over10k_orc_bucketed;
+select distinct 7 as seven, INPUT__FILE__NAME from over10k_orc_bucketed_n0;
 
 -- convert table to acid
-alter table over10k_orc_bucketed set TBLPROPERTIES ('transactional'='true');
+alter table over10k_orc_bucketed_n0 set TBLPROPERTIES ('transactional'='true');
 
 -- this should vectorize (and push predicate to storage: filterExpr in TableScan )
 --             Execution mode: vectorized (both Map and Reducer)
-explain select t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by t, si, i;
-select t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by  t, si, i;
+explain select t, si, i from over10k_orc_bucketed_n0 where b = 4294967363 and t < 100 order by t, si, i;
+select t, si, i from over10k_orc_bucketed_n0 where b = 4294967363 and t < 100 order by  t, si, i;
 
 -- this should vectorize (and push predicate to storage: filterExpr in TableScan )
 --            Execution mode: vectorized
-explain select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID;
-select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID;
+explain select ROW__ID, t, si, i from over10k_orc_bucketed_n0 where b = 4294967363 and t < 100 order by ROW__ID;
+select ROW__ID, t, si, i from over10k_orc_bucketed_n0 where b = 4294967363 and t < 100 order by ROW__ID;
 
 -- this should vectorize (and push predicate to storage: filterExpr in TableScan )
 -- same as above
-explain update over10k_orc_bucketed set i = 0 where b = 4294967363 and t < 100;
-update over10k_orc_bucketed set i = 0 where b = 4294967363 and t < 100;
+explain update over10k_orc_bucketed_n0 set i = 0 where b = 4294967363 and t < 100;
+update over10k_orc_bucketed_n0 set i = 0 where b = 4294967363 and t < 100;
 
 -- this should produce the same result (data) as previous time this exact query ran
 -- ROW__ID will be different (same bucketProperty)
-select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID;
+select ROW__ID, t, si, i from over10k_orc_bucketed_n0 where b = 4294967363 and t < 100 order by ROW__ID;
 
 -- The idea below was to do check sum queries to ensure that ROW__IDs are unique
 -- to run Compaction and to check that ROW__IDs are the same before and after compaction (for rows
@@ -100,23 +100,23 @@ select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t <
 -- this doesn't vectorize
 -- use explain VECTORIZATION DETAIL to see
 -- notVectorizedReason: Key expression for GROUPBY operator: Vectorizing complex type STRUCT not supported
-explain select ROW__ID, count(*) from over10k_orc_bucketed group by ROW__ID having count(*) > 1;
+explain select ROW__ID, count(*) from over10k_orc_bucketed_n0 group by ROW__ID having count(*) > 1;
 
 -- this test that there are no duplicate ROW__IDs so should produce no output
-select ROW__ID, count(*) from over10k_orc_bucketed group by ROW__ID having count(*) > 1;
+select ROW__ID, count(*) from over10k_orc_bucketed_n0 group by ROW__ID having count(*) > 1;
 
 -- schedule compactor
-alter table over10k_orc_bucketed compact 'major' WITH OVERWRITE TBLPROPERTIES ('compactor.mapreduce.map.memory.mb'='500', 'compactor.mapreduce.reduce.memory.mb'='500','compactor.mapreduce.map.memory.mb'='500', 'compactor.hive.tez.container.size'='500');
+alter table over10k_orc_bucketed_n0 compact 'major' WITH OVERWRITE TBLPROPERTIES ('compactor.mapreduce.map.memory.mb'='500', 'compactor.mapreduce.reduce.memory.mb'='500','compactor.mapreduce.map.memory.mb'='500', 'compactor.hive.tez.container.size'='500');
 
 
 --  run compactor - this currently fails with
 -- Invalid resource request, requested memory < 0, or requested memory > max configured, requestedMemory=1536, maxMemory=512
--- select runWorker() from mydual;
+-- select runWorker() from mydual_n0;
 
 -- show compactions;
 
 -- this should produce the same (data + ROW__ID) as before compaction
-select ROW__ID, t, si, i from over10k_orc_bucketed where b = 4294967363 and t < 100 order by ROW__ID;
+select ROW__ID, t, si, i from over10k_orc_bucketed_n0 where b = 4294967363 and t < 100 order by ROW__ID;
 
 -- this test that there are no duplicate ROW__IDs so should produce no output
-select ROW__ID, count(*) from over10k_orc_bucketed group by ROW__ID having count(*) > 1;
+select ROW__ID, count(*) from over10k_orc_bucketed_n0 group by ROW__ID having count(*) > 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/acid_vectorization_project.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_vectorization_project.q b/ql/src/test/queries/clientpositive/acid_vectorization_project.q
index 3725cb1..e564442 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization_project.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization_project.q
@@ -4,9 +4,9 @@ set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
 set hive.exec.dynamic.partition.mode=nonstrict;
 
-CREATE TABLE acid_vectorized(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');
-insert into table acid_vectorized select cint, cstring1, cfloat from alltypesorc where cint is not null order by cint limit 10;
+CREATE TABLE acid_vectorized_n2(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');
+insert into table acid_vectorized_n2 select cint, cstring1, cfloat from alltypesorc where cint is not null order by cint limit 10;
 set hive.vectorized.execution.enabled=true;
-select a,b from acid_vectorized order by a;
-select a,c from acid_vectorized order by a;
-select b,c from acid_vectorized order by b;
+select a,b from acid_vectorized_n2 order by a;
+select a,c from acid_vectorized_n2 order by a;
+select b,c from acid_vectorized_n2 order by b;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/add_part_multiple.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/add_part_multiple.q b/ql/src/test/queries/clientpositive/add_part_multiple.q
index 93ed787..3c8bb52 100644
--- a/ql/src/test/queries/clientpositive/add_part_multiple.q
+++ b/ql/src/test/queries/clientpositive/add_part_multiple.q
@@ -1,25 +1,25 @@
 set hive.mapred.mode=nonstrict;
 -- HIVE-5122 locations for 2nd, 3rd... partition are ignored
 
-CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING);
+CREATE TABLE add_part_test_n1 (key STRING, value STRING) PARTITIONED BY (ds STRING);
 
 explain
-ALTER TABLE add_part_test ADD IF NOT EXISTS
+ALTER TABLE add_part_test_n1 ADD IF NOT EXISTS
 PARTITION (ds='2010-01-01') location 'A'
 PARTITION (ds='2010-02-01') location 'B'
 PARTITION (ds='2010-03-01')
 PARTITION (ds='2010-04-01') location 'C';
 
-ALTER TABLE add_part_test ADD IF NOT EXISTS
+ALTER TABLE add_part_test_n1 ADD IF NOT EXISTS
 PARTITION (ds='2010-01-01') location 'A'
 PARTITION (ds='2010-02-01') location 'B'
 PARTITION (ds='2010-03-01')
 PARTITION (ds='2010-04-01') location 'C';
 
 from src TABLESAMPLE (1 ROWS)
-insert into table add_part_test PARTITION (ds='2010-01-01') select 100,100
-insert into table add_part_test PARTITION (ds='2010-02-01') select 200,200
-insert into table add_part_test PARTITION (ds='2010-03-01') select 400,300
-insert into table add_part_test PARTITION (ds='2010-04-01') select 500,400;
+insert into table add_part_test_n1 PARTITION (ds='2010-01-01') select 100,100
+insert into table add_part_test_n1 PARTITION (ds='2010-02-01') select 200,200
+insert into table add_part_test_n1 PARTITION (ds='2010-03-01') select 400,300
+insert into table add_part_test_n1 PARTITION (ds='2010-04-01') select 500,400;
 
-select * from add_part_test;
+select * from add_part_test_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/allow_change_col_type_par.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/allow_change_col_type_par.q b/ql/src/test/queries/clientpositive/allow_change_col_type_par.q
index dba4e70..aad6370 100644
--- a/ql/src/test/queries/clientpositive/allow_change_col_type_par.q
+++ b/ql/src/test/queries/clientpositive/allow_change_col_type_par.q
@@ -1,5 +1,5 @@
 -- Setup
-create table t1 (c1 int);
+create table t1_n14 (c1 int);
 
 -- Check value of parameter
 set hive.metastore.disallow.incompatible.col.type.changes;
@@ -13,4 +13,4 @@ set hive.metastore.disallow.incompatible.col.type.changes;
 set metaconf:hive.metastore.disallow.incompatible.col.type.changes;
 
 -- Change int to small int now allowed.
-alter table t1 change column c1 c1 smallint;
+alter table t1_n14 change column c1 c1 smallint;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter5.q b/ql/src/test/queries/clientpositive/alter5.q
index 66c9f8d..39aae0b 100644
--- a/ql/src/test/queries/clientpositive/alter5.q
+++ b/ql/src/test/queries/clientpositive/alter5.q
@@ -5,24 +5,24 @@
 create table alter5_src ( col1 string ) stored as textfile ;
 load data local inpath '../../data/files/test.dat' overwrite into table alter5_src ;
 
-create table alter5 ( col1 string ) partitioned by (dt string);
+create table alter5_n1 ( col1 string ) partitioned by (dt string);
 
 --
 -- Here's the interesting bit for HIVE-2117 - partition subdir should be
 -- named "parta".
 --
-alter table alter5 add partition (dt='a') location 'parta';
+alter table alter5_n1 add partition (dt='a') location 'parta';
 
-describe extended alter5 partition (dt='a');
+describe extended alter5_n1 partition (dt='a');
 
-insert overwrite table alter5 partition (dt='a') select col1 from alter5_src ;
-select * from alter5 where dt='a';
+insert overwrite table alter5_n1 partition (dt='a') select col1 from alter5_src ;
+select * from alter5_n1 where dt='a';
 
-describe extended alter5 partition (dt='a');
+describe extended alter5_n1 partition (dt='a');
 
 -- Cleanup
 DROP TABLE alter5_src;
-DROP TABLE alter5;
+DROP TABLE alter5_n1;
 SHOW TABLES;
 
 -- With non-default Database
@@ -34,12 +34,12 @@ SHOW TABLES;
 create table alter5_src ( col1 string ) stored as textfile ;
 load data local inpath '../../data/files/test.dat' overwrite into table alter5_src ;
 
-create table alter5 ( col1 string ) partitioned by (dt string);
-alter table alter5 add partition (dt='a') location 'parta';
+create table alter5_n1 ( col1 string ) partitioned by (dt string);
+alter table alter5_n1 add partition (dt='a') location 'parta';
 
-describe extended alter5 partition (dt='a');
+describe extended alter5_n1 partition (dt='a');
 
-insert overwrite table alter5 partition (dt='a') select col1 from alter5_src ;
-select * from alter5 where dt='a';
+insert overwrite table alter5_n1 partition (dt='a') select col1 from alter5_src ;
+select * from alter5_n1 where dt='a';
 
-describe extended alter5 partition (dt='a');
+describe extended alter5_n1 partition (dt='a');

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alterColumnStats.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alterColumnStats.q b/ql/src/test/queries/clientpositive/alterColumnStats.q
index 4456088..7319be7 100644
--- a/ql/src/test/queries/clientpositive/alterColumnStats.q
+++ b/ql/src/test/queries/clientpositive/alterColumnStats.q
@@ -1,21 +1,21 @@
 set hive.mapred.mode=nonstrict;
 
-drop table p;
+drop table p_n0;
 
-CREATE TABLE p(insert_num int, c1 tinyint, c2 smallint);
+CREATE TABLE p_n0(insert_num int, c1 tinyint, c2 smallint);
 
-desc formatted p;
+desc formatted p_n0;
 
-insert into p values (1,22,333);
+insert into p_n0 values (1,22,333);
 
-desc formatted p;
+desc formatted p_n0;
 
-alter table p replace columns (insert_num int, c1 STRING, c2 STRING);
+alter table p_n0 replace columns (insert_num int, c1 STRING, c2 STRING);
 
-desc formatted p;
+desc formatted p_n0;
 
-desc formatted p c1;
+desc formatted p_n0 c1;
 
-desc formatted p c2;
+desc formatted p_n0 c2;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_change_db_location.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_change_db_location.q b/ql/src/test/queries/clientpositive/alter_change_db_location.q
index a74ba2a..2033346 100644
--- a/ql/src/test/queries/clientpositive/alter_change_db_location.q
+++ b/ql/src/test/queries/clientpositive/alter_change_db_location.q
@@ -1,5 +1,5 @@
 create database newDB location "/tmp/";
 describe database extended newDB;
 use newDB;
-create table tab (name string);
-alter table tab rename to newName;
+create table tab_n13 (name string);
+alter table tab_n13 rename to newName;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_file_format.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_file_format.q b/ql/src/test/queries/clientpositive/alter_file_format.q
index eb1f6bf..54e94ae 100644
--- a/ql/src/test/queries/clientpositive/alter_file_format.q
+++ b/ql/src/test/queries/clientpositive/alter_file_format.q
@@ -19,27 +19,27 @@ desc FORMATTED alter_file_format_test;
 ALTER TABLE alter_file_format_test SET FILEFORMAT INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat' SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe';
 desc FORMATTED alter_file_format_test;
 
-drop table alter_partition_format_test;
+drop table alter_partition_format_test_n0;
 
 --partitioned table
-create table alter_partition_format_test (key int, value string) partitioned by (ds string);
+create table alter_partition_format_test_n0 (key int, value string) partitioned by (ds string);
 
-alter table alter_partition_format_test add partition(ds='2010');
-desc FORMATTED alter_partition_format_test partition(ds='2010');
+alter table alter_partition_format_test_n0 add partition(ds='2010');
+desc FORMATTED alter_partition_format_test_n0 partition(ds='2010');
 
-alter table alter_partition_format_test partition(ds='2010') set fileformat rcfile;
-desc FORMATTED alter_partition_format_test partition(ds='2010');
+alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat rcfile;
+desc FORMATTED alter_partition_format_test_n0 partition(ds='2010');
 
-alter table alter_partition_format_test partition(ds='2010') set fileformat textfile;
-desc FORMATTED alter_partition_format_test partition(ds='2010');
+alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat textfile;
+desc FORMATTED alter_partition_format_test_n0 partition(ds='2010');
 
-alter table alter_partition_format_test partition(ds='2010') set fileformat rcfile;
-desc FORMATTED alter_partition_format_test partition(ds='2010');
+alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat rcfile;
+desc FORMATTED alter_partition_format_test_n0 partition(ds='2010');
 
-alter table alter_partition_format_test partition(ds='2010') set fileformat sequencefile;
-desc FORMATTED alter_partition_format_test partition(ds='2010');
+alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat sequencefile;
+desc FORMATTED alter_partition_format_test_n0 partition(ds='2010');
 
-alter table alter_partition_format_test partition(ds='2010') set fileformat parquet;
-desc FORMATTED alter_partition_format_test partition(ds='2010');
+alter table alter_partition_format_test_n0 partition(ds='2010') set fileformat parquet;
+desc FORMATTED alter_partition_format_test_n0 partition(ds='2010');
 
-drop table alter_partition_format_test;
\ No newline at end of file
+drop table alter_partition_format_test_n0;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_merge.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_merge.q b/ql/src/test/queries/clientpositive/alter_merge.q
index 1839005..b00ce45 100644
--- a/ql/src/test/queries/clientpositive/alter_merge.q
+++ b/ql/src/test/queries/clientpositive/alter_merge.q
@@ -1,44 +1,44 @@
 set hive.strict.checks.bucketing=false;
 
 set hive.mapred.mode=nonstrict;
-create table src_rc_merge_test(key int, value string) stored as rcfile;
+create table src_rc_merge_test_n2(key int, value string) stored as rcfile;
 
-load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test;
-load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test;
-load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test;
+load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_n2;
+load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_n2;
+load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_n2;
 
-show table extended like `src_rc_merge_test`;
+show table extended like `src_rc_merge_test_n2`;
 
-select count(1) from src_rc_merge_test;
-select sum(hash(key)), sum(hash(value)) from src_rc_merge_test;
+select count(1) from src_rc_merge_test_n2;
+select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_n2;
 
-alter table src_rc_merge_test concatenate;
+alter table src_rc_merge_test_n2 concatenate;
 
-show table extended like `src_rc_merge_test`;
+show table extended like `src_rc_merge_test_n2`;
 
-select count(1) from src_rc_merge_test;
-select sum(hash(key)), sum(hash(value)) from src_rc_merge_test;
+select count(1) from src_rc_merge_test_n2;
+select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_n2;
 
 
-create table src_rc_merge_test_part(key int, value string) partitioned by (ds string) stored as rcfile;
+create table src_rc_merge_test_part_n0(key int, value string) partitioned by (ds string) stored as rcfile;
 
-alter table src_rc_merge_test_part add partition (ds='2011');
+alter table src_rc_merge_test_part_n0 add partition (ds='2011');
 
-load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2011');
-load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2011');
-load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2011');
+load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part_n0 partition (ds='2011');
+load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part_n0 partition (ds='2011');
+load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part_n0 partition (ds='2011');
 
-show table extended like `src_rc_merge_test_part` partition (ds='2011');
+show table extended like `src_rc_merge_test_part_n0` partition (ds='2011');
 
-select count(1) from src_rc_merge_test_part;
-select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part;
+select count(1) from src_rc_merge_test_part_n0;
+select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n0;
 
-alter table src_rc_merge_test_part partition (ds='2011') concatenate;
+alter table src_rc_merge_test_part_n0 partition (ds='2011') concatenate;
 
-show table extended like `src_rc_merge_test_part` partition (ds='2011');
+show table extended like `src_rc_merge_test_part_n0` partition (ds='2011');
 
-select count(1) from src_rc_merge_test_part;
-select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part;
+select count(1) from src_rc_merge_test_part_n0;
+select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n0;
 
-drop table src_rc_merge_test;
-drop table src_rc_merge_test_part;
\ No newline at end of file
+drop table src_rc_merge_test_n2;
+drop table src_rc_merge_test_part_n0;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_merge_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_merge_2.q b/ql/src/test/queries/clientpositive/alter_merge_2.q
index b0dd56c..8d36fdf 100644
--- a/ql/src/test/queries/clientpositive/alter_merge_2.q
+++ b/ql/src/test/queries/clientpositive/alter_merge_2.q
@@ -1,21 +1,21 @@
 set hive.strict.checks.bucketing=false;
 
-create table src_rc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as rcfile;
+create table src_rc_merge_test_part_n1(key int, value string) partitioned by (ds string, ts string) stored as rcfile;
 
-alter table src_rc_merge_test_part add partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
-desc extended src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
+alter table src_rc_merge_test_part_n1 add partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
+desc extended src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
 
-load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
-load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
-load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
+load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
+load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
+load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
 
-select count(1) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
-select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
+select count(1) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31';
+select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31';
 
-alter table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate;
+alter table src_rc_merge_test_part_n1 partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate;
 
 
-select count(1) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
-select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
+select count(1) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31';
+select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31';
 
-drop table src_rc_merge_test_part;
+drop table src_rc_merge_test_part_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_merge_orc.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_merge_orc.q b/ql/src/test/queries/clientpositive/alter_merge_orc.q
index aac98a8..0768a1b 100644
--- a/ql/src/test/queries/clientpositive/alter_merge_orc.q
+++ b/ql/src/test/queries/clientpositive/alter_merge_orc.q
@@ -19,25 +19,25 @@ select count(1) from src_orc_merge_test;
 select sum(hash(key)), sum(hash(value)) from src_orc_merge_test;
 
 
-create table src_orc_merge_test_part(key int, value string) partitioned by (ds string) stored as orc;
+create table src_orc_merge_test_part_n2(key int, value string) partitioned by (ds string) stored as orc;
 
-alter table src_orc_merge_test_part add partition (ds='2011');
+alter table src_orc_merge_test_part_n2 add partition (ds='2011');
 
-insert overwrite table src_orc_merge_test_part partition (ds='2011') select * from src;
-insert into table src_orc_merge_test_part partition (ds='2011') select * from src;
-insert into table src_orc_merge_test_part partition (ds='2011') select * from src;
+insert overwrite table src_orc_merge_test_part_n2 partition (ds='2011') select * from src;
+insert into table src_orc_merge_test_part_n2 partition (ds='2011') select * from src;
+insert into table src_orc_merge_test_part_n2 partition (ds='2011') select * from src;
 
-show table extended like `src_orc_merge_test_part` partition (ds='2011');
+show table extended like `src_orc_merge_test_part_n2` partition (ds='2011');
 
-select count(1) from src_orc_merge_test_part;
-select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part;
+select count(1) from src_orc_merge_test_part_n2;
+select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part_n2;
 
-alter table src_orc_merge_test_part partition (ds='2011') concatenate;
+alter table src_orc_merge_test_part_n2 partition (ds='2011') concatenate;
 
-show table extended like `src_orc_merge_test_part` partition (ds='2011');
+show table extended like `src_orc_merge_test_part_n2` partition (ds='2011');
 
-select count(1) from src_orc_merge_test_part;
-select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part;
+select count(1) from src_orc_merge_test_part_n2;
+select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part_n2;
 
 drop table src_orc_merge_test;
-drop table src_orc_merge_test_part;
+drop table src_orc_merge_test_part_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
index 15a88bb..ca2dd49 100644
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
+++ b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
@@ -1,84 +1,84 @@
 -- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
 -- the partition metadata is updated as well.
 
-CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING);
+CREATE TABLE tst1_n0(key STRING, value STRING) PARTITIONED BY (ds STRING);
 
-DESCRIBE FORMATTED tst1;
+DESCRIBE FORMATTED tst1_n0;
 
 
 
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src;
 
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1');
 
 -- Test an unbucketed partition gets converted to bucketed
-ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS;
+ALTER TABLE tst1_n0 CLUSTERED BY (key) INTO 8 BUCKETS;
 
-DESCRIBE FORMATTED tst1;
+DESCRIBE FORMATTED tst1_n0;
 
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src;
 
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1');
 
 -- Test an unsorted partition gets converted to sorted
-ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS;
+ALTER TABLE tst1_n0 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS;
 
-DESCRIBE FORMATTED tst1;
+DESCRIBE FORMATTED tst1_n0;
 
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src;
 
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1');
 
 -- Test changing the bucket columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS;
+ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS;
 
-DESCRIBE FORMATTED tst1;
+DESCRIBE FORMATTED tst1_n0;
 
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src;
 
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1');
 
 -- Test changing the number of buckets
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS;
+ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS;
 
-DESCRIBE FORMATTED tst1;
+DESCRIBE FORMATTED tst1_n0;
 
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src;
 
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1');
 
 -- Test changing the sort columns
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS;
+ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS;
 
-DESCRIBE FORMATTED tst1;
+DESCRIBE FORMATTED tst1_n0;
 
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src;
 
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1');
 
 -- Test changing the sort order
-ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS;
+ALTER TABLE tst1_n0 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS;
 
-DESCRIBE FORMATTED tst1;
+DESCRIBE FORMATTED tst1_n0;
 
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src;
 
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1');
 
 -- Test a sorted partition gets converted to unsorted
-ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS;
+ALTER TABLE tst1_n0 CLUSTERED BY (value) INTO 4 BUCKETS;
 
-DESCRIBE FORMATTED tst1;
+DESCRIBE FORMATTED tst1_n0;
 
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src;
 
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1');
 
 -- Test a bucketed partition gets converted to unbucketed
-ALTER TABLE tst1 NOT CLUSTERED;
+ALTER TABLE tst1_n0 NOT CLUSTERED;
 
-DESCRIBE FORMATTED tst1;
+DESCRIBE FORMATTED tst1_n0;
 
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+INSERT OVERWRITE TABLE tst1_n0 PARTITION (ds = '1') SELECT key, value FROM src;
 
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+DESCRIBE FORMATTED tst1_n0 PARTITION (ds = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
index 7d523d9..33b8ccc 100644
--- a/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
+++ b/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
@@ -1,59 +1,59 @@
 -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
-create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets;
+create table tst1_n1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets;
 
-alter table tst1 clustered by (key) into 8 buckets;
+alter table tst1_n1 clustered by (key) into 8 buckets;
 
-describe formatted tst1;
+describe formatted tst1_n1;
 
 
-insert overwrite table tst1 partition (ds='1') select key, value from src;
+insert overwrite table tst1_n1 partition (ds='1') select key, value from src;
 
-describe formatted tst1 partition (ds = '1');
+describe formatted tst1_n1 partition (ds = '1');
 
 -- Test changing bucket number
 
-alter table tst1 clustered by (key) into 12 buckets;
+alter table tst1_n1 clustered by (key) into 12 buckets;
 
-insert overwrite table tst1 partition (ds='1') select key, value from src;
+insert overwrite table tst1_n1 partition (ds='1') select key, value from src;
 
-describe formatted tst1 partition (ds = '1');
+describe formatted tst1_n1 partition (ds = '1');
 
-describe formatted tst1;
+describe formatted tst1_n1;
 
 -- Test changing bucket number of (table/partition)
 
-alter table tst1 into 4 buckets;
+alter table tst1_n1 into 4 buckets;
 
-describe formatted tst1;
+describe formatted tst1_n1;
 
-describe formatted tst1 partition (ds = '1');
+describe formatted tst1_n1 partition (ds = '1');
 
-alter table tst1 partition (ds = '1') into 6 buckets;
+alter table tst1_n1 partition (ds = '1') into 6 buckets;
 
-describe formatted tst1;
+describe formatted tst1_n1;
 
-describe formatted tst1 partition (ds = '1');
+describe formatted tst1_n1 partition (ds = '1');
 
 -- Test adding sort order
 
-alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets;
+alter table tst1_n1 clustered by (key) sorted by (key asc) into 12 buckets;
 
-describe formatted tst1;
+describe formatted tst1_n1;
 
 -- Test changing sort order
 
-alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets;
+alter table tst1_n1 clustered by (key) sorted by (value desc) into 12 buckets;
 
-describe formatted tst1;
+describe formatted tst1_n1;
 
 -- Test removing test order
 
-alter table tst1 clustered by (value) into 12 buckets;
+alter table tst1_n1 clustered by (value) into 12 buckets;
 
-describe formatted tst1;
+describe formatted tst1_n1;
 
 -- Test removing buckets
 
-alter table tst1 not clustered;
+alter table tst1_n1 not clustered;
 
-describe formatted tst1;
+describe formatted tst1_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_partition_with_whitelist.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_partition_with_whitelist.q b/ql/src/test/queries/clientpositive/alter_partition_with_whitelist.q
index 301362a..a0c695f 100644
--- a/ql/src/test/queries/clientpositive/alter_partition_with_whitelist.q
+++ b/ql/src/test/queries/clientpositive/alter_partition_with_whitelist.q
@@ -1,9 +1,9 @@
 SET hive.metastore.partition.name.whitelist.pattern=[A-Za-z]*;
 -- This pattern matches only letters.
 
-CREATE TABLE part_whitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING);
-SHOW PARTITIONS part_whitelist_test;
+CREATE TABLE part_whitelist_test_n0 (key STRING, value STRING) PARTITIONED BY (ds STRING);
+SHOW PARTITIONS part_whitelist_test_n0;
 
-ALTER TABLE part_whitelist_test ADD PARTITION (ds='Part');
+ALTER TABLE part_whitelist_test_n0 ADD PARTITION (ds='Part');
 
-ALTER TABLE part_whitelist_test PARTITION (ds='Part') rename to partition (ds='Apart');     
+ALTER TABLE part_whitelist_test_n0 PARTITION (ds='Part') rename to partition (ds='Apart');     

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q b/ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q
index 70f2bb4..076c7c3 100644
--- a/ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q
+++ b/ql/src/test/queries/clientpositive/alter_rename_partition_authorization.q
@@ -1,23 +1,23 @@
 set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
 -- SORT_BEFORE_DIFF
 
-create table src_auth_tmp as select * from src;
+create table src_auth_tmp_n1 as select * from src;
 
-create table authorization_part (key int, value string) partitioned by (ds string);
-ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE");
+create table authorization_part_n1 (key int, value string) partitioned by (ds string);
+ALTER TABLE authorization_part_n1 SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE");
 set hive.security.authorization.enabled=true;
-grant select on table src_auth_tmp to user hive_test_user;
+grant select on table src_auth_tmp_n1 to user hive_test_user;
 
 -- column grant to user
-grant Create on table authorization_part to user hive_test_user;
-grant Update on table authorization_part to user hive_test_user;
-grant Drop on table authorization_part to user hive_test_user;
+grant Create on table authorization_part_n1 to user hive_test_user;
+grant Update on table authorization_part_n1 to user hive_test_user;
+grant Drop on table authorization_part_n1 to user hive_test_user;
 
-show grant user hive_test_user on table authorization_part;
-grant select(key) on table authorization_part to user hive_test_user;
-insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp; 
-show grant user hive_test_user on table authorization_part(key) partition (ds='2010');
-alter table authorization_part partition (ds='2010') rename to partition (ds='2010_tmp');
-show grant user hive_test_user on table authorization_part(key) partition (ds='2010_tmp');
+show grant user hive_test_user on table authorization_part_n1;
+grant select(key) on table authorization_part_n1 to user hive_test_user;
+insert overwrite table authorization_part_n1 partition (ds='2010') select key, value from src_auth_tmp_n1; 
+show grant user hive_test_user on table authorization_part_n1(key) partition (ds='2010');
+alter table authorization_part_n1 partition (ds='2010') rename to partition (ds='2010_tmp');
+show grant user hive_test_user on table authorization_part_n1(key) partition (ds='2010_tmp');
 
-drop table authorization_part;
+drop table authorization_part_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_table_serde.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_table_serde.q b/ql/src/test/queries/clientpositive/alter_table_serde.q
index a80693e..3523f21 100644
--- a/ql/src/test/queries/clientpositive/alter_table_serde.q
+++ b/ql/src/test/queries/clientpositive/alter_table_serde.q
@@ -1,33 +1,33 @@
 -- test table
-create table test_table (id int, query string, name string);
-describe extended test_table;
+create table test_table_n1 (id int, query string, name string);
+describe extended test_table_n1;
 
-alter table test_table set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
-describe extended test_table;
+alter table test_table_n1 set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
+describe extended test_table_n1;
 
-alter table test_table set serdeproperties ('field.delim' = ',');
-describe extended test_table;
+alter table test_table_n1 set serdeproperties ('field.delim' = ',');
+describe extended test_table_n1;
 
-drop table test_table;
+drop table test_table_n1;
 
 --- test partitioned table
-create table test_table (id int, query string, name string) partitioned by (dt string);
+create table test_table_n1 (id int, query string, name string) partitioned by (dt string);
 
-alter table test_table add partition (dt = '2011');
-describe extended test_table partition (dt='2011');
+alter table test_table_n1 add partition (dt = '2011');
+describe extended test_table_n1 partition (dt='2011');
 
-alter table test_table set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
-describe extended test_table partition (dt='2011');
+alter table test_table_n1 set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
+describe extended test_table_n1 partition (dt='2011');
 
-alter table test_table set serdeproperties ('field.delim' = ',');
-describe extended test_table partition (dt='2011');
+alter table test_table_n1 set serdeproperties ('field.delim' = ',');
+describe extended test_table_n1 partition (dt='2011');
 
 -- test partitions
 
-alter table test_table partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
-describe extended test_table partition (dt='2011');
+alter table test_table_n1 partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
+describe extended test_table_n1 partition (dt='2011');
 
-alter table test_table partition(dt='2011') set serdeproperties ('field.delim' = ',');
-describe extended test_table partition (dt='2011');
+alter table test_table_n1 partition(dt='2011') set serdeproperties ('field.delim' = ',');
+describe extended test_table_n1 partition (dt='2011');
 
-drop table test_table
+drop table test_table_n1

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_table_serde2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_table_serde2.q b/ql/src/test/queries/clientpositive/alter_table_serde2.q
index 6ca3be3..2826123 100644
--- a/ql/src/test/queries/clientpositive/alter_table_serde2.q
+++ b/ql/src/test/queries/clientpositive/alter_table_serde2.q
@@ -1,20 +1,20 @@
 -- Tests that when overwriting a partition in a table after altering the serde properties
 -- the partition metadata is updated as well.
 
-CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING);
+CREATE TABLE tst1_n5(key STRING, value STRING) PARTITIONED BY (ds STRING);
 
-DESCRIBE FORMATTED tst1;
+DESCRIBE FORMATTED tst1_n5;
 
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+INSERT OVERWRITE TABLE tst1_n5 PARTITION (ds = '1') SELECT key, value FROM src;
 
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+DESCRIBE FORMATTED tst1_n5 PARTITION (ds = '1');
 
 -- Test altering the serde properties
 
-ALTER TABLE tst1 SET SERDEPROPERTIES ('field.delim' = ',');
+ALTER TABLE tst1_n5 SET SERDEPROPERTIES ('field.delim' = ',');
 
-DESCRIBE FORMATTED tst1;
+DESCRIBE FORMATTED tst1_n5;
 
-INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+INSERT OVERWRITE TABLE tst1_n5 PARTITION (ds = '1') SELECT key, value FROM src;
 
-DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+DESCRIBE FORMATTED tst1_n5 PARTITION (ds = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_table_stats_status.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_table_stats_status.q b/ql/src/test/queries/clientpositive/alter_table_stats_status.q
index 8e07b81..a880df5 100644
--- a/ql/src/test/queries/clientpositive/alter_table_stats_status.q
+++ b/ql/src/test/queries/clientpositive/alter_table_stats_status.q
@@ -24,25 +24,25 @@ describe formatted statstable;
 
 drop table statstable;
 
-create table srcpart like default.srcpart;
-load data local inpath '../../data/files/kv1.txt' overwrite into table srcpart partition (ds='2008-04-08', hr='11');
-load data local inpath '../../data/files/kv1.txt' overwrite into table srcpart partition (ds='2008-04-08', hr='12');
+create table srcpart_n0 like default.srcpart;
+load data local inpath '../../data/files/kv1.txt' overwrite into table srcpart_n0 partition (ds='2008-04-08', hr='11');
+load data local inpath '../../data/files/kv1.txt' overwrite into table srcpart_n0 partition (ds='2008-04-08', hr='12');
 
-analyze table srcpart partition (ds='2008-04-08', hr='11') compute statistics;
-describe formatted srcpart partition (ds='2008-04-08', hr='11');
+analyze table srcpart_n0 partition (ds='2008-04-08', hr='11') compute statistics;
+describe formatted srcpart_n0 partition (ds='2008-04-08', hr='11');
 
-alter table srcpart touch;
-alter table srcpart partition (ds='2008-04-08', hr='11') rename to partition (ds='2017-01-19', hr='11');
-alter table srcpart partition (ds='2017-01-19', hr='11') add columns (newcol string);
-alter table srcpart partition (ds='2017-01-19', hr='11') change key key string;
-alter table srcpart set tblproperties('testpartstats'='unchange');
-describe formatted srcpart partition (ds='2017-01-19', hr='11');
+alter table srcpart_n0 touch;
+alter table srcpart_n0 partition (ds='2008-04-08', hr='11') rename to partition (ds='2017-01-19', hr='11');
+alter table srcpart_n0 partition (ds='2017-01-19', hr='11') add columns (newcol string);
+alter table srcpart_n0 partition (ds='2017-01-19', hr='11') change key key string;
+alter table srcpart_n0 set tblproperties('testpartstats'='unchange');
+describe formatted srcpart_n0 partition (ds='2017-01-19', hr='11');
 
-alter table srcpart partition (ds='2017-01-19', hr='11') update statistics set ('numRows' = '1000');
-describe formatted srcpart partition (ds='2017-01-19', hr='11');
+alter table srcpart_n0 partition (ds='2017-01-19', hr='11') update statistics set ('numRows' = '1000');
+describe formatted srcpart_n0 partition (ds='2017-01-19', hr='11');
 
-analyze table srcpart partition (ds='2017-01-19', hr='11') compute statistics;
-describe formatted srcpart partition (ds='2017-01-19', hr='11');
+analyze table srcpart_n0 partition (ds='2017-01-19', hr='11') compute statistics;
+describe formatted srcpart_n0 partition (ds='2017-01-19', hr='11');
 
-drop table srcpart;
+drop table srcpart_n0;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_table_update_status.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_table_update_status.q b/ql/src/test/queries/clientpositive/alter_table_update_status.q
index 9415c97..2cace7d 100644
--- a/ql/src/test/queries/clientpositive/alter_table_update_status.q
+++ b/ql/src/test/queries/clientpositive/alter_table_update_status.q
@@ -1,31 +1,32 @@
-create table src_stat as select * from src1;
+--! qt:dataset:src1
+create table src_stat_n0 as select * from src1;
 
-create table src_stat_int (
+create table src_stat_int_n0 (
   key         double,
   value       string
 );
 
-LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE src_stat_int;
+LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE src_stat_int_n0;
 
-ANALYZE TABLE src_stat COMPUTE STATISTICS for columns key;
+ANALYZE TABLE src_stat_n0 COMPUTE STATISTICS for columns key;
 
-describe formatted src_stat key;
+describe formatted src_stat_n0 key;
 
-ALTER TABLE src_stat UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111');
+ALTER TABLE src_stat_n0 UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111');
 
-describe formatted src_stat key;
+describe formatted src_stat_n0 key;
 
-ALTER TABLE src_stat UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124');
+ALTER TABLE src_stat_n0 UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124');
 
-describe formatted src_stat value;
+describe formatted src_stat_n0 value;
 
-ANALYZE TABLE src_stat_int COMPUTE STATISTICS for columns key;
+ANALYZE TABLE src_stat_int_n0 COMPUTE STATISTICS for columns key;
 
-describe formatted src_stat_int key;
+describe formatted src_stat_int_n0 key;
 
-ALTER TABLE src_stat_int UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22');
+ALTER TABLE src_stat_int_n0 UPDATE STATISTICS for column key SET ('numDVs'='2222','lowValue'='333.22','highValue'='22.22');
 
-describe formatted src_stat_int key;
+describe formatted src_stat_int_n0 key;
 
 
 
@@ -33,19 +34,19 @@ create database if not exists dummydb;
 
 use dummydb;
 
-ALTER TABLE default.src_stat UPDATE STATISTICS for column key SET ('numDVs'='3333','avgColLen'='2.222');
+ALTER TABLE default.src_stat_n0 UPDATE STATISTICS for column key SET ('numDVs'='3333','avgColLen'='2.222');
 
-describe formatted default.src_stat key;
+describe formatted default.src_stat_n0 key;
 
-ALTER TABLE default.src_stat UPDATE STATISTICS for column value SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235');
+ALTER TABLE default.src_stat_n0 UPDATE STATISTICS for column value SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235');
 
-describe formatted default.src_stat value;
+describe formatted default.src_stat_n0 value;
 
 use default;
 
 drop database dummydb;
 
-create table datatype_stats(
+create table datatype_stats_n0(
         t TINYINT,
         s SMALLINT,
         i INT,
@@ -61,77 +62,77 @@ create table datatype_stats(
         bl BOOLEAN,
         bin BINARY);
 
-INSERT INTO datatype_stats values(2, 3, 45, 456, 45454.4, 454.6565, 2355, '2012-01-01 01:02:03', '2012-01-01', 'update_statistics', 'stats', 'hive', 'true', 'bin');
-INSERT INTO datatype_stats values(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-DESC FORMATTED datatype_stats s;
-DESC FORMATTED datatype_stats i;
-DESC FORMATTED datatype_stats b;
-DESC FORMATTED datatype_stats f;
-DESC FORMATTED datatype_stats d;
-DESC FORMATTED datatype_stats dem;
-DESC FORMATTED datatype_stats ts;
-DESC FORMATTED datatype_stats dt;
-DESC FORMATTED datatype_stats str;
-DESC FORMATTED datatype_stats v;
-DESC FORMATTED datatype_stats c;
-DESC FORMATTED datatype_stats bl;
-DESC FORMATTED datatype_stats bin;
+INSERT INTO datatype_stats_n0 values(2, 3, 45, 456, 45454.4, 454.6565, 2355, '2012-01-01 01:02:03', '2012-01-01', 'update_statistics', 'stats', 'hive', 'true', 'bin');
+INSERT INTO datatype_stats_n0 values(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+DESC FORMATTED datatype_stats_n0 s;
+DESC FORMATTED datatype_stats_n0 i;
+DESC FORMATTED datatype_stats_n0 b;
+DESC FORMATTED datatype_stats_n0 f;
+DESC FORMATTED datatype_stats_n0 d;
+DESC FORMATTED datatype_stats_n0 dem;
+DESC FORMATTED datatype_stats_n0 ts;
+DESC FORMATTED datatype_stats_n0 dt;
+DESC FORMATTED datatype_stats_n0 str;
+DESC FORMATTED datatype_stats_n0 v;
+DESC FORMATTED datatype_stats_n0 c;
+DESC FORMATTED datatype_stats_n0 bl;
+DESC FORMATTED datatype_stats_n0 bin;
 
 --tinyint
-DESC FORMATTED datatype_stats t;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column t SET ('numDVs'='232','numNulls'='233','highValue'='234','lowValue'='35');
-DESC FORMATTED datatype_stats t;
+DESC FORMATTED datatype_stats_n0 t;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column t SET ('numDVs'='232','numNulls'='233','highValue'='234','lowValue'='35');
+DESC FORMATTED datatype_stats_n0 t;
 --smallint
-DESC FORMATTED datatype_stats s;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column s SET ('numDVs'='56','numNulls'='56','highValue'='489','lowValue'='25');
-DESC FORMATTED datatype_stats s;
+DESC FORMATTED datatype_stats_n0 s;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column s SET ('numDVs'='56','numNulls'='56','highValue'='489','lowValue'='25');
+DESC FORMATTED datatype_stats_n0 s;
 --int
-DESC FORMATTED datatype_stats i;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column i SET ('numDVs'='59','numNulls'='1','highValue'='889','lowValue'='5');
-DESC FORMATTED datatype_stats i;
+DESC FORMATTED datatype_stats_n0 i;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column i SET ('numDVs'='59','numNulls'='1','highValue'='889','lowValue'='5');
+DESC FORMATTED datatype_stats_n0 i;
 --bigint
-DESC FORMATTED datatype_stats b;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column b SET ('numDVs'='9','numNulls'='14','highValue'='89','lowValue'='8');
-DESC FORMATTED datatype_stats b;
+DESC FORMATTED datatype_stats_n0 b;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column b SET ('numDVs'='9','numNulls'='14','highValue'='89','lowValue'='8');
+DESC FORMATTED datatype_stats_n0 b;
 
 --float
-DESC FORMATTED datatype_stats f;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column f SET ('numDVs'='563','numNulls'='45','highValue'='2345.656','lowValue'='8.00');
-DESC FORMATTED datatype_stats f;
+DESC FORMATTED datatype_stats_n0 f;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column f SET ('numDVs'='563','numNulls'='45','highValue'='2345.656','lowValue'='8.00');
+DESC FORMATTED datatype_stats_n0 f;
 --double
-DESC FORMATTED datatype_stats d;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column d SET ('numDVs'='5677','numNulls'='12','highValue'='560.3367','lowValue'='0.00455');
-DESC FORMATTED datatype_stats d;
+DESC FORMATTED datatype_stats_n0 d;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column d SET ('numDVs'='5677','numNulls'='12','highValue'='560.3367','lowValue'='0.00455');
+DESC FORMATTED datatype_stats_n0 d;
 --decimal
-DESC FORMATTED datatype_stats dem;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column dem SET ('numDVs'='57','numNulls'='912','highValue'='560','lowValue'='0');
-DESC FORMATTED datatype_stats dem;
+DESC FORMATTED datatype_stats_n0 dem;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column dem SET ('numDVs'='57','numNulls'='912','highValue'='560','lowValue'='0');
+DESC FORMATTED datatype_stats_n0 dem;
 --timestamp
-DESC FORMATTED datatype_stats ts;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column ts SET ('numDVs'='7','numNulls'='12','highValue'='1357030923','lowValue'='1357030924');
-DESC FORMATTED datatype_stats ts;
+DESC FORMATTED datatype_stats_n0 ts;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column ts SET ('numDVs'='7','numNulls'='12','highValue'='1357030923','lowValue'='1357030924');
+DESC FORMATTED datatype_stats_n0 ts;
 --decimal
-DESC FORMATTED datatype_stats dt;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column dt SET ('numDVs'='57','numNulls'='912','highValue'='2012-01-01','lowValue'='2001-02-04');
-DESC FORMATTED datatype_stats dt;
+DESC FORMATTED datatype_stats_n0 dt;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column dt SET ('numDVs'='57','numNulls'='912','highValue'='2012-01-01','lowValue'='2001-02-04');
+DESC FORMATTED datatype_stats_n0 dt;
 --string
-DESC FORMATTED datatype_stats str;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column str SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235');
-DESC FORMATTED datatype_stats str;
+DESC FORMATTED datatype_stats_n0 str;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column str SET ('numDVs'='232','numNulls'='233','avgColLen'='2.34','maxColLen'='235');
+DESC FORMATTED datatype_stats_n0 str;
 --varchar
-DESC FORMATTED datatype_stats v;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column v SET ('numDVs'='22','numNulls'='33','avgColLen'='4.40','maxColLen'='25');
-DESC FORMATTED datatype_stats v;
+DESC FORMATTED datatype_stats_n0 v;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column v SET ('numDVs'='22','numNulls'='33','avgColLen'='4.40','maxColLen'='25');
+DESC FORMATTED datatype_stats_n0 v;
 --char
-DESC FORMATTED datatype_stats c;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column c SET ('numDVs'='2','numNulls'='03','avgColLen'='9.00','maxColLen'='58');
-DESC FORMATTED datatype_stats c;
+DESC FORMATTED datatype_stats_n0 c;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column c SET ('numDVs'='2','numNulls'='03','avgColLen'='9.00','maxColLen'='58');
+DESC FORMATTED datatype_stats_n0 c;
 --boolean
-DESC FORMATTED datatype_stats bl;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column bl SET ('numNulls'='1','numTrues'='9','numFalses'='8');
-DESC FORMATTED datatype_stats bl;
+DESC FORMATTED datatype_stats_n0 bl;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column bl SET ('numNulls'='1','numTrues'='9','numFalses'='8');
+DESC FORMATTED datatype_stats_n0 bl;
 --binary
-DESC FORMATTED datatype_stats bin;
-ALTER TABLE default.datatype_stats UPDATE STATISTICS for column bin SET ('numNulls'='8','avgColLen'='2.0','maxColLen'='8');
-DESC FORMATTED datatype_stats bin;
+DESC FORMATTED datatype_stats_n0 bin;
+ALTER TABLE default.datatype_stats_n0 UPDATE STATISTICS for column bin SET ('numNulls'='8','avgColLen'='2.0','maxColLen'='8');
+DESC FORMATTED datatype_stats_n0 bin;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/alter_view_rename.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_view_rename.q b/ql/src/test/queries/clientpositive/alter_view_rename.q
index 598a942..95c2912 100644
--- a/ql/src/test/queries/clientpositive/alter_view_rename.q
+++ b/ql/src/test/queries/clientpositive/alter_view_rename.q
@@ -2,15 +2,15 @@ set hive.mapred.mode=nonstrict;
 CREATE DATABASE tv1;
 CREATE DATABASE tv2;
 
-CREATE TABLE invites (foo INT, bar STRING) PARTITIONED BY (ds STRING);
-CREATE VIEW tv1.view1 as SELECT * FROM invites;
+CREATE TABLE invites_n1 (foo INT, bar STRING) PARTITIONED BY (ds STRING);
+CREATE VIEW tv1.view1 as SELECT * FROM invites_n1;
 DESCRIBE EXTENDED tv1.view1;
 
 ALTER VIEW tv1.view1 RENAME TO tv2.view2;
 DESCRIBE EXTENDED tv2.view2;
 SELECT * FROM tv2.view2;
 
-DROP TABLE invites;
+DROP TABLE invites_n1;
 DROP VIEW tv2.view2;
 
 DROP DATABASE tv1;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/analyze_table_null_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/analyze_table_null_partition.q b/ql/src/test/queries/clientpositive/analyze_table_null_partition.q
index 2f7a893..4f2b947 100644
--- a/ql/src/test/queries/clientpositive/analyze_table_null_partition.q
+++ b/ql/src/test/queries/clientpositive/analyze_table_null_partition.q
@@ -1,21 +1,21 @@
 SET hive.exec.dynamic.partition.mode=nonstrict;
 
-DROP TABLE IF EXISTS test1;
-DROP TABLE IF EXISTS test2;
+DROP TABLE IF EXISTS test1_n9;
+DROP TABLE IF EXISTS test2_n6;
 
-CREATE TABLE test1(name string, age int);
-CREATE TABLE test2(name string) PARTITIONED by (age int);
+CREATE TABLE test1_n9(name string, age int);
+CREATE TABLE test2_n6(name string) PARTITIONED by (age int);
 
-LOAD DATA LOCAL INPATH '../../data/files/test1.txt' INTO TABLE test1;
-FROM test1 INSERT OVERWRITE TABLE test2 PARTITION(age) SELECT test1.name, test1.age;
+LOAD DATA LOCAL INPATH '../../data/files/test1.txt' INTO TABLE test1_n9;
+FROM test1_n9 INSERT OVERWRITE TABLE test2_n6 PARTITION(age) SELECT test1_n9.name, test1_n9.age;
 
-ANALYZE TABLE test2 PARTITION(age) COMPUTE STATISTICS;
+ANALYZE TABLE test2_n6 PARTITION(age) COMPUTE STATISTICS;
 
 -- To show stats. It doesn't show due to a bug.
-DESC EXTENDED test2;
+DESC EXTENDED test2_n6;
 
 -- Another way to show stats.
-EXPLAIN EXTENDED select * from test2;
+EXPLAIN EXTENDED select * from test2_n6;
 
-DROP TABLE test1;
-DROP TABLE test2;
+DROP TABLE test1_n9;
+DROP TABLE test2_n6;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/analyze_tbl_date.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/analyze_tbl_date.q b/ql/src/test/queries/clientpositive/analyze_tbl_date.q
index 6726b83..9132ea2 100644
--- a/ql/src/test/queries/clientpositive/analyze_tbl_date.q
+++ b/ql/src/test/queries/clientpositive/analyze_tbl_date.q
@@ -1,14 +1,14 @@
 set hive.fetch.task.conversion=none;
 
-create table test_table(d date);
+create table test_table_n7(d date);
 
-insert into test_table values(null), (null), (null);
+insert into test_table_n7 values(null), (null), (null);
 
-analyze table test_table compute statistics for columns;
+analyze table test_table_n7 compute statistics for columns;
 
-describe formatted test_table;
+describe formatted test_table_n7;
 
-explain select * from test_table where d is not null;
+explain select * from test_table_n7 where d is not null;
 
-select * from test_table where d is not null;
+select * from test_table_n7 where d is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/analyze_tbl_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/analyze_tbl_part.q b/ql/src/test/queries/clientpositive/analyze_tbl_part.q
index e8010bd..e2d5351 100644
--- a/ql/src/test/queries/clientpositive/analyze_tbl_part.q
+++ b/ql/src/test/queries/clientpositive/analyze_tbl_part.q
@@ -1,21 +1,21 @@
 set hive.mapred.mode=nonstrict;
-create table src_stat_part(key string, value string) partitioned by (partitionId int);
+create table src_stat_part_n1(key string, value string) partitioned by (partitionId int);
 
-insert overwrite table src_stat_part partition (partitionId=1)
+insert overwrite table src_stat_part_n1 partition (partitionId=1)
 select * from src1;
 
-insert overwrite table src_stat_part partition (partitionId=2)
+insert overwrite table src_stat_part_n1 partition (partitionId=2)
 select * from src1;
 
-ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key;
+ANALYZE TABLE src_stat_part_n1 partition (partitionId) COMPUTE STATISTICS for columns key;
 
-describe formatted src_stat_part PARTITION(partitionId=1) key;
+describe formatted src_stat_part_n1 PARTITION(partitionId=1) key;
 
-ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value;
+ANALYZE TABLE src_stat_part_n1 partition (partitionId) COMPUTE STATISTICS for columns key, value;
 
-describe formatted src_stat_part PARTITION(partitionId=1) key;
+describe formatted src_stat_part_n1 PARTITION(partitionId=1) key;
 
-describe formatted src_stat_part PARTITION(partitionId=2) value;
+describe formatted src_stat_part_n1 PARTITION(partitionId=2) value;
 
 create table src_stat_string_part(key string, value string) partitioned by (partitionName string);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q b/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q
index dd5b9dd..a402444 100644
--- a/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q
+++ b/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q
@@ -1,4 +1,4 @@
-create table over1k(
+create table over1k_n4(
 t tinyint,
 si smallint,
 i int,
@@ -13,15 +13,15 @@ bin binary)
 ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
 STORED AS TEXTFILE;
 
-load data local inpath '../../data/files/over1k' overwrite into table over1k;
-load data local inpath '../../data/files/over1k' into table over1k;
+load data local inpath '../../data/files/over1k' overwrite into table over1k_n4;
+load data local inpath '../../data/files/over1k' into table over1k_n4;
 
-analyze table over1k compute statistics;
-analyze table over1k compute statistics for columns;
+analyze table over1k_n4 compute statistics;
+analyze table over1k_n4 compute statistics for columns;
 
 set hive.stats.fetch.column.stats=true;
 set hive.optimize.point.lookup=false;
-explain select count(*) from over1k where (
+explain select count(*) from over1k_n4 where (
 (t=1 and si=2)
 or (t=2 and si=3)
 or (t=3 and si=4) 
@@ -44,7 +44,7 @@ or (t=47 and si=48)
 or (t=52 and si=53));
 
 set hive.stats.fetch.column.stats=false;
-explain select count(*) from over1k where (
+explain select count(*) from over1k_n4 where (
 (t=1 and si=2)
 or (t=2 and si=3)
 or (t=3 and si=4) 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/annotate_stats_groupby.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/annotate_stats_groupby.q b/ql/src/test/queries/clientpositive/annotate_stats_groupby.q
index 77571cf..081f057 100644
--- a/ql/src/test/queries/clientpositive/annotate_stats_groupby.q
+++ b/ql/src/test/queries/clientpositive/annotate_stats_groupby.q
@@ -21,69 +21,69 @@ set hive.map.aggr.hash.percentmemory=0.0f;
 -- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet)
 -- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct)
 
-create table if not exists loc_staging (
+create table if not exists loc_staging_n2 (
   state string,
   locid int,
   zip bigint,
   year int
 ) row format delimited fields terminated by '|' stored as textfile;
 
-create table loc_orc like loc_staging;
-alter table loc_orc set fileformat orc;
+create table loc_orc_n2 like loc_staging_n2;
+alter table loc_orc_n2 set fileformat orc;
 
-load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging;
+load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n2;
 
-insert overwrite table loc_orc select * from loc_staging;
+insert overwrite table loc_orc_n2 select * from loc_staging_n2;
 
 -- numRows: 8 rawDataSize: 796
-explain select * from loc_orc;
+explain select * from loc_orc_n2;
 
 -- partial column stats
-analyze table loc_orc compute statistics for columns state;
+analyze table loc_orc_n2 compute statistics for columns state;
 
 -- inner group by: map - numRows: 8 reduce - numRows: 4
 -- outer group by: map - numRows: 4 reduce numRows: 2
 explain select a, c, min(b)
 from ( select state as a, locid as b, count(*) as c
-       from loc_orc
+       from loc_orc_n2
        group by state,locid
      ) sq1
 group by a,c;
 
-analyze table loc_orc compute statistics for columns state,locid,year;
+analyze table loc_orc_n2 compute statistics for columns state,locid,year;
 
 -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8
 -- Case 9: column stats, NO grouping sets - caridnality = 2
-explain select year from loc_orc group by year;
+explain select year from loc_orc_n2 group by year;
 
 -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8
 -- Case 9: column stats, NO grouping sets - caridnality = 8
-explain select state,locid from loc_orc group by state,locid;
+explain select state,locid from loc_orc_n2 group by state,locid;
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
 -- Case 8: column stats, grouping sets - cardinality = 32
-explain select state,locid from loc_orc group by state,locid with cube;
+explain select state,locid from loc_orc_n2 group by state,locid with cube;
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
 -- Case 8: column stats, grouping sets - cardinality = 24
-explain select state,locid from loc_orc group by state,locid with rollup;
-explain select state,locid from loc_orc group by rollup( state,locid );
+explain select state,locid from loc_orc_n2 group by state,locid with rollup;
+explain select state,locid from loc_orc_n2 group by rollup( state,locid );
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8
 -- Case 8: column stats, grouping sets - cardinality = 8
-explain select state,locid from loc_orc group by state,locid grouping sets((state));
+explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state));
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16
 -- Case 8: column stats, grouping sets - cardinality = 16
-explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid));
+explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid));
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
 -- Case 8: column stats, grouping sets - cardinality = 24
-explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),());
+explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid),());
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
 -- Case 8: column stats, grouping sets - cardinality = 32
-explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),());
+explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state,locid),(state),(locid),());
 
 set hive.map.aggr.hash.percentmemory=0.5f;
 set mapred.max.split.size=80;
@@ -91,52 +91,52 @@ set mapred.max.split.size=80;
 
 -- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
 -- Case 9: column stats, NO grouping sets - caridnality = 2
-explain select year from loc_orc group by year;
+explain select year from loc_orc_n2 group by year;
 
 -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16
 -- Case 8: column stats, grouping sets - cardinality = 16
-explain select state,locid from loc_orc group by state,locid with cube;
+explain select state,locid from loc_orc_n2 group by state,locid with cube;
 
 -- ndvProduct becomes 0 as zip does not have column stats
 -- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
 -- Case 9: column stats, NO grouping sets - caridnality = 2
-explain select state,zip from loc_orc group by state,zip;
+explain select state,zip from loc_orc_n2 group by state,zip;
 
 set mapred.max.split.size=1000;
 set hive.stats.fetch.column.stats=false;
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
 -- Case 7: NO column stats - cardinality = 16
-explain select state,locid from loc_orc group by state,locid with cube;
+explain select state,locid from loc_orc_n2 group by state,locid with cube;
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24
 -- Case 7: NO column stats - cardinality = 12
-explain select state,locid from loc_orc group by state,locid with rollup;
-explain select state,locid from loc_orc group by rollup (state,locid);
+explain select state,locid from loc_orc_n2 group by state,locid with rollup;
+explain select state,locid from loc_orc_n2 group by rollup (state,locid);
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8
 -- Case 7: NO column stats - cardinality = 4
-explain select state,locid from loc_orc group by state,locid grouping sets((state));
+explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state));
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16
 -- Case 7: NO column stats - cardinality = 8
-explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid));
+explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid));
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24
 -- Case 7: NO column stats - cardinality = 12
-explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),());
+explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state),(locid),());
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
 -- Case 7: NO column stats - cardinality = 16
-explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),());
+explain select state,locid from loc_orc_n2 group by state,locid grouping sets((state,locid),(state),(locid),());
 
 set mapred.max.split.size=80;
 
 -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8
 -- Case 7: NO column stats - cardinality = 4
-explain select year from loc_orc group by year;
+explain select year from loc_orc_n2 group by year;
 
 -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32
 -- Case 7: NO column stats - cardinality = 16
-explain select state,locid from loc_orc group by state,locid with cube;
+explain select state,locid from loc_orc_n2 group by state,locid with cube;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/annotate_stats_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/annotate_stats_join.q b/ql/src/test/queries/clientpositive/annotate_stats_join.q
index 015c647..4d24324 100644
--- a/ql/src/test/queries/clientpositive/annotate_stats_join.q
+++ b/ql/src/test/queries/clientpositive/annotate_stats_join.q
@@ -1,13 +1,13 @@
 set hive.stats.fetch.column.stats=true;
 set hive.stats.ndv.error=0.0;
 
-create table if not exists emp (
+create table if not exists emp_n2 (
   lastname string,
   deptid int,
   locid int
 ) row format delimited fields terminated by '|' stored as textfile;
 
-create table if not exists dept (
+create table if not exists dept_n1 (
   deptid int,
   deptname string
 ) row format delimited fields terminated by '|' stored as textfile;
@@ -19,63 +19,63 @@ create table if not exists loc (
   year int
 ) row format delimited fields terminated by '|' stored as textfile;
 
-LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp;
-LOAD DATA LOCAL INPATH '../../data/files/dept.txt' OVERWRITE INTO TABLE dept;
+LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp_n2;
+LOAD DATA LOCAL INPATH '../../data/files/dept.txt' OVERWRITE INTO TABLE dept_n1;
 LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc;
 
-analyze table emp compute statistics;
-analyze table dept compute statistics;
+analyze table emp_n2 compute statistics;
+analyze table dept_n1 compute statistics;
 analyze table loc compute statistics;
-analyze table emp compute statistics for columns lastname,deptid,locid;
-analyze table dept compute statistics for columns deptname,deptid;
+analyze table emp_n2 compute statistics for columns lastname,deptid,locid;
+analyze table dept_n1 compute statistics for columns deptname,deptid;
 analyze table loc compute statistics for columns state,locid,zip,year;
 
 -- number of rows
--- emp  - 48
--- dept - 6
+-- emp_n2  - 48
+-- dept_n1 - 6
 -- loc  - 8
 
 -- count distincts for relevant columns (since count distinct values are approximate in some cases count distint values will be greater than number of rows)
--- emp.deptid - 3
--- emp.lastname - 6
--- emp.locid - 7
--- dept.deptid - 7
--- dept.deptname - 6
+-- emp_n2.deptid - 3
+-- emp_n2.lastname - 6
+-- emp_n2.locid - 7
+-- dept_n1.deptid - 7
+-- dept_n1.deptname - 6
 -- loc.locid - 7
 -- loc.state - 6
 
 -- 2 relations, 1 attribute
 -- Expected output rows: (48*6)/max(3,7) = 41
-explain select * from emp e join dept d on (e.deptid = d.deptid);
+explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid);
 
 -- 2 relations, 2 attributes
 -- Expected output rows: (48*6)/(max(3,7) * max(6,6)) = 6
-explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname;
-explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname);
+explain select * from emp_n2,dept_n1 where emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname;
+explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid and e.lastname = d.deptname);
 
 -- 2 relations, 3 attributes
 -- Expected output rows: (48*6)/(max(3,7) * max(6,6) * max(6,6)) = 1
-explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname;
+explain select * from emp_n2,dept_n1 where emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname;
 
 -- 3 relations, 1 attribute
 -- Expected output rows: (48*6*48)/top2largest(3,7,3) = 658
-explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid);
+explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid) join emp_n2 e1 on (e.deptid = e1.deptid);
 
 -- Expected output rows: (48*6*8)/top2largest(3,7,7) = 47
-explain select * from emp e join dept d  on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid);
+explain select * from emp_n2 e join dept_n1 d  on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid);
 
 -- 3 relations and 2 attribute
 -- Expected output rows: (48*6*8)/top2largest(3,7,7)*top2largest(6,6,6) = 1
-explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state);
+explain select * from emp_n2 e join dept_n1 d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state);
 
 -- left outer join
-explain select * from emp left outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname;
+explain select * from emp_n2 left outer join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname;
 
 -- left semi join
-explain select * from emp left semi join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname;
+explain select * from emp_n2 left semi join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname;
 
 -- right outer join
-explain select * from emp right outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname;
+explain select * from emp_n2 right outer join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname;
 
 -- full outer join
-explain select * from emp full outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname;
+explain select * from emp_n2 full outer join dept_n1 on emp_n2.deptid = dept_n1.deptid and emp_n2.lastname = dept_n1.deptname and dept_n1.deptname = emp_n2.lastname;