You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2018/05/21 20:24:55 UTC

[18/51] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q b/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q
index f6b067b..52d9a0a 100644
--- a/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q
+++ b/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q
@@ -2,205 +2,205 @@
 SET hive.vectorized.execution.enabled=true;
 SET hive.fetch.task.conversion=none;
 
-create table testbasicint (uint_32_col int) stored as parquet;
-load data local inpath '../../data/files/test_uint.parquet' into table testbasicint;
-select * from testbasicint;
-drop table testbasicint;
+create table testbasicint_n0 (uint_32_col int) stored as parquet;
+load data local inpath '../../data/files/test_uint.parquet' into table testbasicint_n0;
+select * from testbasicint_n0;
+drop table testbasicint_n0;
 
-create table testbigintinv
+create table testbigintinv_n0
 (col_INT32_UINT_8 bigint,
  col_INT32_UINT_16 bigint,
  col_INT32_UINT_32 bigint,
  col_INT64_UINT_64 bigint) stored as parquet;
-load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testbigintinv;
-select * from testbigintinv;
-drop table testbigintinv;
+load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testbigintinv_n0;
+select * from testbigintinv_n0;
+drop table testbigintinv_n0;
 
-create table testintinv
+create table testintinv_n0
 (col_INT32_UINT_8  int,
  col_INT32_UINT_16 int,
  col_INT32_UINT_32 int,
  col_INT64_UINT_64 int) stored as parquet;
-load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testintinv;
-select * from testintinv;
-drop table testintinv;
+load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testintinv_n0;
+select * from testintinv_n0;
+drop table testintinv_n0;
 
-create table testsmallintinv
+create table testsmallintinv_n0
 (col_INT32_UINT_8  smallint,
  col_INT32_UINT_16 smallint,
  col_INT32_UINT_32 smallint,
  col_INT64_UINT_64 smallint) stored as parquet;
-load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testsmallintinv;
-select * from testsmallintinv;
-drop table testsmallintinv;
+load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testsmallintinv_n0;
+select * from testsmallintinv_n0;
+drop table testsmallintinv_n0;
 
-create table testtinyintinv
+create table testtinyintinv_n0
 (col_INT32_UINT_8  tinyint,
  col_INT32_UINT_16 tinyint,
  col_INT32_UINT_32 tinyint,
  col_INT64_UINT_64 tinyint) stored as parquet;
-load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testtinyintinv;
-select * from testtinyintinv;
-drop table testtinyintinv;
+load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testtinyintinv_n0;
+select * from testtinyintinv_n0;
+drop table testtinyintinv_n0;
 
-create table testfloatinv
+create table testfloatinv_n0
 (col_INT32_UINT_8  float,
  col_INT32_UINT_16 float,
  col_INT32_UINT_32 float,
  col_INT64_UINT_64 float) stored as parquet;
-load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testfloatinv;
-select * from testfloatinv;
-drop table testfloatinv;
+load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testfloatinv_n0;
+select * from testfloatinv_n0;
+drop table testfloatinv_n0;
 
-create table testdoubleinv
+create table testdoubleinv_n0
 (col_INT32_UINT_8  double,
  col_INT32_UINT_16 double,
  col_INT32_UINT_32 double,
  col_INT64_UINT_64 double) stored as parquet;
-load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdoubleinv;
-select * from testdoubleinv;
-drop table testdoubleinv;
+load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdoubleinv_n0;
+select * from testdoubleinv_n0;
+drop table testdoubleinv_n0;
 
-create table testdecimal22_2inv
+create table testdecimal22_2inv_n0
 (col_INT32_UINT_8  decimal(22,2),
  col_INT32_UINT_16 decimal(22,2),
  col_INT32_UINT_32 decimal(22,2),
  col_INT64_UINT_64 decimal(22,2)) stored as parquet;
-load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal22_2inv;
-select * from testdecimal22_2inv;
-drop table testdecimal22_2inv;
+load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal22_2inv_n0;
+select * from testdecimal22_2inv_n0;
+drop table testdecimal22_2inv_n0;
 
-create table testdecimal13_2inv
+create table testdecimal13_2inv_n0
 (col_INT32_UINT_8  decimal(13,2),
  col_INT32_UINT_16 decimal(13,2),
  col_INT32_UINT_32 decimal(13,2),
  col_INT64_UINT_64 decimal(13,2)) stored as parquet;
-load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal13_2inv;
-select * from testdecimal13_2inv;
-drop table testdecimal13_2inv;
+load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal13_2inv_n0;
+select * from testdecimal13_2inv_n0;
+drop table testdecimal13_2inv_n0;
 
-create table testdecimal8_2inv
+create table testdecimal8_2inv_n0
 (col_INT32_UINT_8  decimal(8,2),
  col_INT32_UINT_16 decimal(8,2),
  col_INT32_UINT_32 decimal(8,2),
  col_INT64_UINT_64 decimal(8,2)) stored as parquet;
-load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal8_2inv;
-select * from testdecimal8_2inv;
-drop table testdecimal8_2inv;
+load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal8_2inv_n0;
+select * from testdecimal8_2inv_n0;
+drop table testdecimal8_2inv_n0;
 
-create table testdecimal6_2inv
+create table testdecimal6_2inv_n0
 (col_INT32_UINT_8  decimal(6,2),
  col_INT32_UINT_16 decimal(6,2),
  col_INT32_UINT_32 decimal(6,2),
  col_INT64_UINT_64 decimal(6,2)) stored as parquet;
-load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal6_2inv;
-select * from testdecimal6_2inv;
-drop table testdecimal6_2inv;
+load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal6_2inv_n0;
+select * from testdecimal6_2inv_n0;
+drop table testdecimal6_2inv_n0;
 
-create table testdecimal3_2inv
+create table testdecimal3_2inv_n0
 (col_INT32_UINT_8  decimal(3,2),
  col_INT32_UINT_16 decimal(3,2),
  col_INT32_UINT_32 decimal(3,2),
  col_INT64_UINT_64 decimal(3,2)) stored as parquet;
-load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal3_2inv;
-select * from testdecimal3_2inv;
-drop table testdecimal3_2inv;
+load data local inpath '../../data/files/data_including_invalid_values.parquet' into table testdecimal3_2inv_n0;
+select * from testdecimal3_2inv_n0;
+drop table testdecimal3_2inv_n0;
 
-create table testbigintvalid
+create table testbigintvalid_n0
 (col_INT32_UINT_8 bigint,
  col_INT32_UINT_16 bigint,
  col_INT32_UINT_32 bigint,
  col_INT64_UINT_64 bigint) stored as parquet;
-load data local inpath '../../data/files/data_with_valid_values.parquet' into table testbigintvalid;
-select * from testbigintvalid;
-drop table testbigintvalid;
+load data local inpath '../../data/files/data_with_valid_values.parquet' into table testbigintvalid_n0;
+select * from testbigintvalid_n0;
+drop table testbigintvalid_n0;
 
-create table testintvalid
+create table testintvalid_n0
 (col_INT32_UINT_8  int,
  col_INT32_UINT_16 int,
  col_INT32_UINT_32 int,
  col_INT64_UINT_64 int) stored as parquet;
-load data local inpath '../../data/files/data_with_valid_values.parquet' into table testintvalid;
-select * from testintvalid;
-drop table testintvalid;
+load data local inpath '../../data/files/data_with_valid_values.parquet' into table testintvalid_n0;
+select * from testintvalid_n0;
+drop table testintvalid_n0;
 
-create table testsmallintvalid
+create table testsmallintvalid_n0
 (col_INT32_UINT_8  smallint,
  col_INT32_UINT_16 smallint,
  col_INT32_UINT_32 smallint,
  col_INT64_UINT_64 smallint) stored as parquet;
-load data local inpath '../../data/files/data_with_valid_values.parquet' into table testsmallintvalid;
-select * from testsmallintvalid;
-drop table testsmallintvalid;
+load data local inpath '../../data/files/data_with_valid_values.parquet' into table testsmallintvalid_n0;
+select * from testsmallintvalid_n0;
+drop table testsmallintvalid_n0;
 
-create table testtinyintvalid
+create table testtinyintvalid_n0
 (col_INT32_UINT_8  tinyint,
  col_INT32_UINT_16 tinyint,
  col_INT32_UINT_32 tinyint,
  col_INT64_UINT_64 tinyint) stored as parquet;
-load data local inpath '../../data/files/data_with_valid_values.parquet' into table testtinyintvalid;
-select * from testtinyintvalid;
-drop table testtinyintvalid;
+load data local inpath '../../data/files/data_with_valid_values.parquet' into table testtinyintvalid_n0;
+select * from testtinyintvalid_n0;
+drop table testtinyintvalid_n0;
 
-create table testfloatvalid
+create table testfloatvalid_n0
 (col_INT32_UINT_8  float,
  col_INT32_UINT_16 float,
  col_INT32_UINT_32 float,
  col_INT64_UINT_64 float) stored as parquet;
-load data local inpath '../../data/files/data_with_valid_values.parquet' into table testfloatvalid;
-select * from testfloatvalid;
-drop table testfloatvalid;
+load data local inpath '../../data/files/data_with_valid_values.parquet' into table testfloatvalid_n0;
+select * from testfloatvalid_n0;
+drop table testfloatvalid_n0;
 
-create table testdoublevalid
+create table testdoublevalid_n0
 (col_INT32_UINT_8  double,
  col_INT32_UINT_16 double,
  col_INT32_UINT_32 double,
  col_INT64_UINT_64 double) stored as parquet;
-load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdoublevalid;
-select * from testdoublevalid;
-drop table testdoublevalid;
+load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdoublevalid_n0;
+select * from testdoublevalid_n0;
+drop table testdoublevalid_n0;
 
-create table testdecimal22_2valid
+create table testdecimal22_2valid_n0
 (col_INT32_UINT_8  decimal(22,2),
  col_INT32_UINT_16 decimal(22,2),
  col_INT32_UINT_32 decimal(22,2),
  col_INT64_UINT_64 decimal(22,2)) stored as parquet;
-load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal22_2valid;
-select * from testdecimal22_2valid;
-drop table testdecimal22_2valid;
+load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal22_2valid_n0;
+select * from testdecimal22_2valid_n0;
+drop table testdecimal22_2valid_n0;
 
-create table testdecimal13_2valid
+create table testdecimal13_2valid_n0
 (col_INT32_UINT_8  decimal(13,2),
  col_INT32_UINT_16 decimal(13,2),
  col_INT32_UINT_32 decimal(13,2),
  col_INT64_UINT_64 decimal(13,2)) stored as parquet;
-load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal13_2valid;
-select * from testdecimal13_2valid;
-drop table testdecimal13_2valid;
+load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal13_2valid_n0;
+select * from testdecimal13_2valid_n0;
+drop table testdecimal13_2valid_n0;
 
-create table testdecimal8_2valid
+create table testdecimal8_2valid_n0
 (col_INT32_UINT_8  decimal(8,2),
  col_INT32_UINT_16 decimal(8,2),
  col_INT32_UINT_32 decimal(8,2),
  col_INT64_UINT_64 decimal(8,2)) stored as parquet;
-load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal8_2valid;
-select * from testdecimal8_2valid;
-drop table testdecimal8_2valid;
+load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal8_2valid_n0;
+select * from testdecimal8_2valid_n0;
+drop table testdecimal8_2valid_n0;
 
-create table testdecimal6_2valid
+create table testdecimal6_2valid_n0
 (col_INT32_UINT_8  decimal(6,2),
  col_INT32_UINT_16 decimal(6,2),
  col_INT32_UINT_32 decimal(6,2),
  col_INT64_UINT_64 decimal(6,2)) stored as parquet;
-load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal6_2valid;
-select * from testdecimal6_2valid;
-drop table testdecimal6_2valid;
+load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal6_2valid_n0;
+select * from testdecimal6_2valid_n0;
+drop table testdecimal6_2valid_n0;
 
-create table testdecimal3_2valid
+create table testdecimal3_2valid_n0
 (col_INT32_UINT_8  decimal(3,2),
  col_INT32_UINT_16 decimal(3,2),
  col_INT32_UINT_32 decimal(3,2),
  col_INT64_UINT_64 decimal(3,2)) stored as parquet;
-load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal3_2valid;
-select * from testdecimal3_2valid;
-drop table testdecimal3_2valid;
+load data local inpath '../../data/files/data_with_valid_values.parquet' into table testdecimal3_2valid_n0;
+select * from testdecimal3_2valid_n0;
+drop table testdecimal3_2valid_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/recursive_dir.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/recursive_dir.q b/ql/src/test/queries/clientpositive/recursive_dir.q
index eea365d..2b25f60 100644
--- a/ql/src/test/queries/clientpositive/recursive_dir.q
+++ b/ql/src/test/queries/clientpositive/recursive_dir.q
@@ -1,20 +1,20 @@
 --! qt:dataset:src
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 
-CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING);
-CREATE TABLE fact_tz(x int) PARTITIONED BY (ds STRING, hr STRING)
+CREATE TABLE fact_daily_n1(x int) PARTITIONED BY (ds STRING);
+CREATE TABLE fact_tz_n0(x int) PARTITIONED BY (ds STRING, hr STRING)
 LOCATION 'pfile:${system:test.tmp.dir}/fact_tz';
 
-INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1')
+INSERT OVERWRITE TABLE fact_tz_n0 PARTITION (ds='1', hr='1')
 SELECT key+11 FROM src WHERE key=484;
 
-ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE');
-ALTER TABLE fact_daily ADD PARTITION (ds='1')
+ALTER TABLE fact_daily_n1 SET TBLPROPERTIES('EXTERNAL'='TRUE');
+ALTER TABLE fact_daily_n1 ADD PARTITION (ds='1')
 LOCATION 'pfile:${system:test.tmp.dir}/fact_tz/ds=1';
 
 set mapred.input.dir.recursive=true;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
-SELECT * FROM fact_daily WHERE ds='1';
+SELECT * FROM fact_daily_n1 WHERE ds='1';
 
-SELECT count(1) FROM fact_daily WHERE ds='1';
+SELECT count(1) FROM fact_daily_n1 WHERE ds='1';

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/reduce_deduplicate_exclude_gby.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/reduce_deduplicate_exclude_gby.q b/ql/src/test/queries/clientpositive/reduce_deduplicate_exclude_gby.q
index e28ed9f..d6c4a89 100644
--- a/ql/src/test/queries/clientpositive/reduce_deduplicate_exclude_gby.q
+++ b/ql/src/test/queries/clientpositive/reduce_deduplicate_exclude_gby.q
@@ -1,8 +1,8 @@
-create table t1( key_int1 int, key_int2 int, key_string1 string, key_string2 string);
+create table t1_n32( key_int1_n32 int, key_int2 int, key_string1 string, key_string2 string);
 
 set hive.optimize.reducededuplication=false;
 
 set hive.map.aggr=false;
-select Q1.key_int1, sum(Q1.key_int1) from (select * from t1 cluster by key_int1) Q1 group by Q1.key_int1;
+select Q1.key_int1_n32, sum(Q1.key_int1_n32) from (select * from t1_n32 cluster by key_int1_n32) Q1 group by Q1.key_int1_n32;
 
-drop table t1;
+drop table t1_n32;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/remove_exprs_stats.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/remove_exprs_stats.q b/ql/src/test/queries/clientpositive/remove_exprs_stats.q
index 741d455..3d73fc3 100644
--- a/ql/src/test/queries/clientpositive/remove_exprs_stats.q
+++ b/ql/src/test/queries/clientpositive/remove_exprs_stats.q
@@ -2,74 +2,74 @@ set hive.optimize.filter.stats.reduction=true;
 set hive.mapred.mode=nonstrict;
 set hive.stats.fetch.column.stats=true;
 
-create table if not exists loc_staging (
+create table if not exists loc_staging_n0 (
   state string,
   locid int,
   zip bigint,
   year int
 ) row format delimited fields terminated by '|' stored as textfile;
 
-create table loc_orc like loc_staging;
-alter table loc_orc set fileformat orc;
+create table loc_orc_n0 like loc_staging_n0;
+alter table loc_orc_n0 set fileformat orc;
 
-load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging;
+load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging_n0;
 
-insert overwrite table loc_orc select * from loc_staging;
+insert overwrite table loc_orc_n0 select * from loc_staging_n0;
 
-analyze table loc_orc compute statistics for columns state,locid,zip,year;
+analyze table loc_orc_n0 compute statistics for columns state,locid,zip,year;
 
 -- always true
-explain select * from loc_orc where locid < 30;
+explain select * from loc_orc_n0 where locid < 30;
 -- always false
-explain select * from loc_orc where locid > 30;
+explain select * from loc_orc_n0 where locid > 30;
 -- always true
-explain select * from loc_orc where locid <= 30;
+explain select * from loc_orc_n0 where locid <= 30;
 -- always false
-explain select * from loc_orc where locid >= 30;
+explain select * from loc_orc_n0 where locid >= 30;
 
 -- nothing to do
-explain select * from loc_orc where locid < 6;
+explain select * from loc_orc_n0 where locid < 6;
 -- always false
-explain select * from loc_orc where locid > 6;
+explain select * from loc_orc_n0 where locid > 6;
 -- always true
-explain select * from loc_orc where locid <= 6;
+explain select * from loc_orc_n0 where locid <= 6;
 -- nothing to do
-explain select * from loc_orc where locid >= 6;
+explain select * from loc_orc_n0 where locid >= 6;
 
 -- always false
-explain select * from loc_orc where locid < 1;
+explain select * from loc_orc_n0 where locid < 1;
 -- nothing to do
-explain select * from loc_orc where locid > 1;
+explain select * from loc_orc_n0 where locid > 1;
 -- nothing to do
-explain select * from loc_orc where locid <= 1;
+explain select * from loc_orc_n0 where locid <= 1;
 -- always true
-explain select * from loc_orc where locid >= 1;
+explain select * from loc_orc_n0 where locid >= 1;
 
 -- 5 should stay
-explain select * from loc_orc where locid IN (-4,5,30,40);
+explain select * from loc_orc_n0 where locid IN (-4,5,30,40);
 -- nothing to do
-explain select * from loc_orc where locid IN (5,2,3);
+explain select * from loc_orc_n0 where locid IN (5,2,3);
 -- 1 and 6 should be left
-explain select * from loc_orc where locid IN (1,6,9);
+explain select * from loc_orc_n0 where locid IN (1,6,9);
 -- always false
-explain select * from loc_orc where locid IN (40,30);
+explain select * from loc_orc_n0 where locid IN (40,30);
 
 
 
-create table t ( s string);
-insert into t values (null),(null);
-analyze table t compute statistics for columns s;
+create table t_n7 ( s string);
+insert into t_n7 values (null),(null);
+analyze table t_n7 compute statistics for columns s;
 
 -- true
-explain select * from t where s is null;
-explain select * from loc_orc where locid is not null;
+explain select * from t_n7 where s is null;
+explain select * from loc_orc_n0 where locid is not null;
 -- false
-explain select * from t where s is not null;
-explain select * from loc_orc where locid is null;
+explain select * from t_n7 where s is not null;
+explain select * from loc_orc_n0 where locid is null;
 
-insert into t values ('val1');
-analyze table t compute statistics for columns s;
+insert into t_n7 values ('val1');
+analyze table t_n7 compute statistics for columns s;
 
 -- untouched
-explain select * from t where s is not null;
-explain select * from t where s is null;
+explain select * from t_n7 where s is not null;
+explain select * from t_n7 where s is null;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/rename_partition_location.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/rename_partition_location.q b/ql/src/test/queries/clientpositive/rename_partition_location.q
index 09114d6..fb93be1 100644
--- a/ql/src/test/queries/clientpositive/rename_partition_location.q
+++ b/ql/src/test/queries/clientpositive/rename_partition_location.q
@@ -3,19 +3,19 @@
 -- This test verifies that if the tables location changes, renaming a partition will not change
 -- the partition location accordingly
 
-CREATE TABLE rename_partition_table (key STRING, value STRING) PARTITIONED BY (part STRING)
+CREATE TABLE rename_partition_table_n0 (key STRING, value STRING) PARTITIONED BY (part STRING)
 STORED AS RCFILE
 LOCATION 'pfile:${system:test.tmp.dir}/rename_partition_table';
 
-INSERT OVERWRITE TABLE rename_partition_table PARTITION (part = '1') SELECT * FROM src;
+INSERT OVERWRITE TABLE rename_partition_table_n0 PARTITION (part = '1') SELECT * FROM src;
 
-ALTER TABLE rename_partition_table SET LOCATION 'file:${system:test.tmp.dir}/rename_partition_table';
+ALTER TABLE rename_partition_table_n0 SET LOCATION 'file:${system:test.tmp.dir}/rename_partition_table';
 
-ALTER TABLE rename_partition_table PARTITION (part = '1') RENAME TO PARTITION (part = '2');
+ALTER TABLE rename_partition_table_n0 PARTITION (part = '1') RENAME TO PARTITION (part = '2');
 
 SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyPartitionIsNotSubdirectoryOfTableHook;
 
-SELECT count(*) FROM rename_partition_table where part = '2';
+SELECT count(*) FROM rename_partition_table_n0 where part = '2';
 
 SET hive.exec.post.hooks=;
 
@@ -32,5 +32,5 @@ SELECT count(*) FROM rename_partition_table_2 where part = '2';
 
 SET hive.exec.post.hooks=;
 
-DROP TABLE rename_partition_table;
+DROP TABLE rename_partition_table_n0;
 DROP TABLE rename_partition_table_2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/repair.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/repair.q b/ql/src/test/queries/clientpositive/repair.q
index 8076030..d48417f 100644
--- a/ql/src/test/queries/clientpositive/repair.q
+++ b/ql/src/test/queries/clientpositive/repair.q
@@ -1,17 +1,17 @@
-DROP TABLE IF EXISTS repairtable;
+DROP TABLE IF EXISTS repairtable_n4;
 
-CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING);
+CREATE TABLE repairtable_n4(col STRING) PARTITIONED BY (p1 STRING, p2 STRING);
 
-MSCK TABLE repairtable;
+MSCK TABLE repairtable_n4;
 
 dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=a/p2=a;
 dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=b/p2=a;
 dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=b/p2=a/datafile;
 
-MSCK TABLE default.repairtable;
+MSCK TABLE default.repairtable_n4;
 
-MSCK REPAIR TABLE default.repairtable;
+MSCK REPAIR TABLE default.repairtable_n4;
 
-MSCK TABLE repairtable;
+MSCK TABLE repairtable_n4;
 
-DROP TABLE default.repairtable;
+DROP TABLE default.repairtable_n4;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/results_cache_invalidation.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/results_cache_invalidation.q b/ql/src/test/queries/clientpositive/results_cache_invalidation.q
index 0ef5c66..ecb5011 100644
--- a/ql/src/test/queries/clientpositive/results_cache_invalidation.q
+++ b/ql/src/test/queries/clientpositive/results_cache_invalidation.q
@@ -3,11 +3,11 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
-create table tab1 (key string, value string) stored as orc tblproperties ('transactional'='true');
-create table tab2 (key string, value string) stored as orc tblproperties ('transactional'='true');
+create table tab1_n6 (key string, value string) stored as orc tblproperties ('transactional'='true');
+create table tab2_n5 (key string, value string) stored as orc tblproperties ('transactional'='true');
 
-insert into tab1 select * from default.src;
-insert into tab2 select * from default.src;
+insert into tab1_n6 select * from default.src;
+insert into tab2_n5 select * from default.src;
 
 set hive.query.results.cache.enabled=true;
 
@@ -16,75 +16,75 @@ set test.comment;
 
 -- Q1
 explain
-select count(*) from tab1 a where key >= 0;
-select count(*) from tab1 a where key >= 0;
+select count(*) from tab1_n6 a where key >= 0;
+select count(*) from tab1_n6 a where key >= 0;
 
 -- Q2
 explain
-select max(key) from tab2;
-select max(key) from tab2;
+select max(key) from tab2_n5;
+select max(key) from tab2_n5;
 
 -- Q3
 explain
-select count(*) from tab1 join tab2 on (tab1.key = tab2.key);
-select count(*) from tab1 join tab2 on (tab1.key = tab2.key);
+select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key);
+select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key);
 
 set test.comment="Q1 should now be able to use cache";
 set test.comment;
 explain
-select count(*) from tab1 a where key >= 0;
-select count(*) from tab1 a where key >= 0;
+select count(*) from tab1_n6 a where key >= 0;
+select count(*) from tab1_n6 a where key >= 0;
 
 set test.comment="Q2 should now be able to use cache";
 set test.comment;
 explain
-select max(key) from tab2;
-select max(key) from tab2;
+select max(key) from tab2_n5;
+select max(key) from tab2_n5;
 
 set test.comment="Q3 should now be able to use cache";
 set test.comment;
 explain
-select count(*) from tab1 join tab2 on (tab1.key = tab2.key);
-select count(*) from tab1 join tab2 on (tab1.key = tab2.key);
+select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key);
+select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key);
 
--- Update tab1 which should invalidate Q1 and Q3.
-insert into tab1 values ('88', 'val_88');
+-- Update tab1_n6 which should invalidate Q1 and Q3.
+insert into tab1_n6 values ('88', 'val_88');
 
 set test.comment="Q1 should not use cache";
 set test.comment;
 explain
-select count(*) from tab1 a where key >= 0;
-select count(*) from tab1 a where key >= 0;
+select count(*) from tab1_n6 a where key >= 0;
+select count(*) from tab1_n6 a where key >= 0;
 
-set test.comment="Q2 should still use cache since tab2 not updated";
+set test.comment="Q2 should still use cache since tab2_n5 not updated";
 set test.comment;
 explain
-select max(key) from tab2;
-select max(key) from tab2;
+select max(key) from tab2_n5;
+select max(key) from tab2_n5;
 
 set test.comment="Q3 should not use cache";
 set test.comment;
 explain
-select count(*) from tab1 join tab2 on (tab1.key = tab2.key);
-select count(*) from tab1 join tab2 on (tab1.key = tab2.key);
+select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key);
+select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key);
 
--- Update tab2 which should invalidate Q2 and Q3.
-insert into tab2 values ('88', 'val_88');
+-- Update tab2_n5 which should invalidate Q2 and Q3.
+insert into tab2_n5 values ('88', 'val_88');
 
 set test.comment="Q1 should use cache";
 set test.comment;
 explain
-select count(*) from tab1 a where key >= 0;
-select count(*) from tab1 a where key >= 0;
+select count(*) from tab1_n6 a where key >= 0;
+select count(*) from tab1_n6 a where key >= 0;
 
 set test.comment="Q2 should not use cache";
 set test.comment;
 explain
-select max(key) from tab2;
-select max(key) from tab2;
+select max(key) from tab2_n5;
+select max(key) from tab2_n5;
 
 set test.comment="Q3 should not use cache";
 set test.comment;
 explain
-select count(*) from tab1 join tab2 on (tab1.key = tab2.key);
-select count(*) from tab1 join tab2 on (tab1.key = tab2.key);
+select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key);
+select count(*) from tab1_n6 join tab2_n5 on (tab1_n6.key = tab2_n5.key);

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/results_cache_transactional.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/results_cache_transactional.q b/ql/src/test/queries/clientpositive/results_cache_transactional.q
index 13fb848..89a6a55 100644
--- a/ql/src/test/queries/clientpositive/results_cache_transactional.q
+++ b/ql/src/test/queries/clientpositive/results_cache_transactional.q
@@ -3,34 +3,34 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
-create table tab1 (key string, value string) stored as orc tblproperties ('transactional'='true');
-create table tab2 (key string, value string) stored as orc tblproperties ('transactional'='true');
+create table tab1_n1 (key string, value string) stored as orc tblproperties ('transactional'='true');
+create table tab2_n1 (key string, value string) stored as orc tblproperties ('transactional'='true');
 
-insert into tab1 select * from default.src;
-insert into tab2 select * from default.src;
+insert into tab1_n1 select * from default.src;
+insert into tab2_n1 select * from default.src;
 
 set hive.query.results.cache.enabled=true;
 set hive.query.results.cache.nontransactional.tables.enabled=false;
 
 explain
-select max(key) from tab1;
-select max(key) from tab1;
+select max(key) from tab1_n1;
+select max(key) from tab1_n1;
 
 set test.comment="Query on transactional table should use cache";
 set test.comment;
 explain
-select max(key) from tab1;
-select max(key) from tab1;
+select max(key) from tab1_n1;
+select max(key) from tab1_n1;
 
 explain
-select count(*) from tab1 join tab2 on (tab1.key = tab2.key);
-select count(*) from tab1 join tab2 on (tab1.key = tab2.key);
+select count(*) from tab1_n1 join tab2_n1 on (tab1_n1.key = tab2_n1.key);
+select count(*) from tab1_n1 join tab2_n1 on (tab1_n1.key = tab2_n1.key);
 
 set test.comment="Join on transactional tables, should use cache";
 set test.comment;
 explain
-select count(*) from tab1 join tab2 on (tab1.key = tab2.key);
-select count(*) from tab1 join tab2 on (tab1.key = tab2.key);
+select count(*) from tab1_n1 join tab2_n1 on (tab1_n1.key = tab2_n1.key);
+select count(*) from tab1_n1 join tab2_n1 on (tab1_n1.key = tab2_n1.key);
 
 
 -- Non-transactional tables
@@ -46,12 +46,12 @@ select max(key) from src;
 select max(key) from src;
 
 explain
-select count(*) from tab1 join src on (tab1.key = src.key);
-select count(*) from tab1 join src on (tab1.key = src.key);
+select count(*) from tab1_n1 join src on (tab1_n1.key = src.key);
+select count(*) from tab1_n1 join src on (tab1_n1.key = src.key);
 
 set test.comment="Join uses non-transactional table, should not use cache";
 set test.comment;
 explain
-select count(*) from tab1 join src on (tab1.key = src.key);
-select count(*) from tab1 join src on (tab1.key = src.key);
+select count(*) from tab1_n1 join src on (tab1_n1.key = src.key);
+select count(*) from tab1_n1 join src on (tab1_n1.key = src.key);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/results_cache_with_masking.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/results_cache_with_masking.q b/ql/src/test/queries/clientpositive/results_cache_with_masking.q
index db27525..d078092 100644
--- a/ql/src/test/queries/clientpositive/results_cache_with_masking.q
+++ b/ql/src/test/queries/clientpositive/results_cache_with_masking.q
@@ -6,14 +6,14 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.autho
 set hive.query.results.cache.enabled=true;
 set hive.query.results.cache.nontransactional.tables.enabled=true;
 
-create table masking_test as select cast(key as int) as key, value from src;
+create table masking_test_n7 as select cast(key as int) as key, value from src;
 
 explain
-select key, count(*) from masking_test group by key;
-select key, count(*) from masking_test group by key;
+select key, count(*) from masking_test_n7 group by key;
+select key, count(*) from masking_test_n7 group by key;
 
 -- This time we should use the cache
 explain
-select key, count(*) from masking_test group by key;
-select key, count(*) from masking_test group by key;
+select key, count(*) from masking_test_n7 group by key;
+select key, count(*) from masking_test_n7 group by key;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/retry_failure.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/retry_failure.q b/ql/src/test/queries/clientpositive/retry_failure.q
index 0d5ce4f..ad12ecd 100644
--- a/ql/src/test/queries/clientpositive/retry_failure.q
+++ b/ql/src/test/queries/clientpositive/retry_failure.q
@@ -1,8 +1,8 @@
 --! qt:dataset:src
 
 SET hive.vectorized.execution.enabled=false;
-create table tx(a int,f string);
-insert into tx values (1,'non_existent_file');
+create table tx_n1(a int,f string);
+insert into tx_n1 values (1,'non_existent_file');
 
 set zzz=1;
 set reexec.overlay.zzz=2;
@@ -10,4 +10,4 @@ set reexec.overlay.zzz=2;
 set hive.query.reexecution.enabled=true;
 set hive.query.reexecution.strategies=overlay;
 
-select assert_true(${hiveconf:zzz} > a) from tx group by a;
+select assert_true(${hiveconf:zzz} > a) from tx_n1 group by a;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/retry_failure_stat_changes.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/retry_failure_stat_changes.q b/ql/src/test/queries/clientpositive/retry_failure_stat_changes.q
index e1c70ed..08fd4a2 100644
--- a/ql/src/test/queries/clientpositive/retry_failure_stat_changes.q
+++ b/ql/src/test/queries/clientpositive/retry_failure_stat_changes.q
@@ -1,7 +1,7 @@
 SET hive.vectorized.execution.enabled=false;
 
-create table tx(a int,u int);
-insert into tx values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(10,10);
+create table tx_n2(a int,u int);
+insert into tx_n2 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(10,10);
 
 create table px(a int,p int);
 insert into px values (2,2),(3,3),(5,5),(7,7),(11,11);
@@ -12,19 +12,19 @@ set hive.query.reexecution.enabled=true;
 set hive.query.reexecution.strategies=overlay,reoptimize;
 
 explain REOPTIMIZATION 
-select sum(u*p) from tx join px on (u=p) where u<10 and p>2;
+select sum(u*p) from tx_n2 join px on (u=p) where u<10 and p>2;
 
 set hive.auto.convert.join=false;
 explain analyze
-select sum(u*p) from tx join px on (u=p) where u<10 and p>2;
+select sum(u*p) from tx_n2 join px on (u=p) where u<10 and p>2;
 set hive.auto.convert.join=true;
 explain analyze
-select sum(u*p) from tx join px on (u=p) where u<10 and p>2;
+select sum(u*p) from tx_n2 join px on (u=p) where u<10 and p>2;
 
 set zzz=1;
 set reexec.overlay.zzz=2000;
 
 explain
-select assert_true_oom(${hiveconf:zzz} > sum(u*p)) from tx join px on (tx.a=px.a) where u<10 and p>2;
-select assert_true_oom(${hiveconf:zzz} > sum(u*p)) from tx join px on (tx.a=px.a) where u<10 and p>2;
+select assert_true_oom(${hiveconf:zzz} > sum(u*p)) from tx_n2 join px on (tx_n2.a=px.a) where u<10 and p>2;
+select assert_true_oom(${hiveconf:zzz} > sum(u*p)) from tx_n2 join px on (tx_n2.a=px.a) where u<10 and p>2;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/root_dir_external_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/root_dir_external_table.q b/ql/src/test/queries/clientpositive/root_dir_external_table.q
index a08d640..7763ce2 100644
--- a/ql/src/test/queries/clientpositive/root_dir_external_table.q
+++ b/ql/src/test/queries/clientpositive/root_dir_external_table.q
@@ -7,7 +7,7 @@ insert overwrite directory "hdfs:///tmp/test_root_dir_external_table" select key
 dfs -cp /tmp/test_root_dir_external_table/000000_0 /000000_0;
 dfs -rmr hdfs:///tmp/test_root_dir_external_table;
 
-create external table roottable (key string) row format delimited fields terminated by '\\t' stored as textfile location 'hdfs:///';
-select count(*) from roottable;
+create external table roottable_n0 (key string) row format delimited fields terminated by '\\t' stored as textfile location 'hdfs:///';
+select count(*) from roottable_n0;
 
 dfs -rmr /000000_0;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/runtime_skewjoin_mapjoin_spark.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/runtime_skewjoin_mapjoin_spark.q b/ql/src/test/queries/clientpositive/runtime_skewjoin_mapjoin_spark.q
index 7ebe127..ac93eed 100644
--- a/ql/src/test/queries/clientpositive/runtime_skewjoin_mapjoin_spark.q
+++ b/ql/src/test/queries/clientpositive/runtime_skewjoin_mapjoin_spark.q
@@ -10,19 +10,19 @@ set hive.auto.convert.join.noconditionaltask.size=50;
 
 -- This is mainly intended for spark, to test runtime skew join together with map join
 
-CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T1_n94(key STRING, val STRING) STORED AS TEXTFILE;
 
-LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n94;
 
 EXPLAIN
 SELECT COUNT(*) FROM
   (SELECT src1.key,src1.value FROM src src1 JOIN src src2 ON src1.key=src2.key) a
 JOIN
-  (SELECT src.key,src.value FROM src JOIN T1 ON src.key=T1.key) b
+  (SELECT src.key,src.value FROM src JOIN T1_n94 ON src.key=T1_n94.key) b
 ON a.key=b.key;
 
 SELECT COUNT(*) FROM
   (SELECT src1.key,src1.value FROM src src1 JOIN src src2 ON src1.key=src2.key) a
 JOIN
-  (SELECT src.key,src.value FROM src JOIN T1 ON src.key=T1.key) b
+  (SELECT src.key,src.value FROM src JOIN T1_n94 ON src.key=T1_n94.key) b
 ON a.key=b.key;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/runtime_stats_hs2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/runtime_stats_hs2.q b/ql/src/test/queries/clientpositive/runtime_stats_hs2.q
index 34a8dd3..1a02eac 100644
--- a/ql/src/test/queries/clientpositive/runtime_stats_hs2.q
+++ b/ql/src/test/queries/clientpositive/runtime_stats_hs2.q
@@ -1,9 +1,9 @@
 
-create table tx(a int,u int);
-insert into tx values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(10,10);
+create table tx_n3(a int,u int);
+insert into tx_n3 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(10,10);
 
-create table px(a int,p int);
-insert into px values (2,2),(3,3),(5,5),(7,7),(11,11);
+create table px_n0(a int,p int);
+insert into px_n0 values (2,2),(3,3),(5,5),(7,7),(11,11);
 
 set hive.explain.user=true;
 set hive.query.reexecution.enabled=true;
@@ -13,10 +13,10 @@ set hive.query.reexecution.stats.persist.scope=hiveserver;
 
 -- join output estimate is underestimated: 1 row
 explain
-select sum(u*p) from tx join px on (u=p) where u<10 and p>2;
+select sum(u*p) from tx_n3 join px_n0 on (u=p) where u<10 and p>2;
 
-select sum(u*p) from tx join px on (u=p) where u<10 and p>2;
+select sum(u*p) from tx_n3 join px_n0 on (u=p) where u<10 and p>2;
 
 -- join output estimate is 3 rows ; all the operators stats are "runtime"
 explain
-select sum(u*p) from tx join px on (u=p) where u<10 and p>2;
+select sum(u*p) from tx_n3 join px_n0 on (u=p) where u<10 and p>2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/sample1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample1.q b/ql/src/test/queries/clientpositive/sample1.q
index 16c5bcd..c7dcbba 100644
--- a/ql/src/test/queries/clientpositive/sample1.q
+++ b/ql/src/test/queries/clientpositive/sample1.q
@@ -1,17 +1,17 @@
 --! qt:dataset:srcpart
 --! qt:dataset:srcbucket
-CREATE TABLE dest1(key INT, value STRING, dt STRING, hr STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n89(key INT, value STRING, dt STRING, hr STRING) STORED AS TEXTFILE;
 
 -- no input pruning, no sample filter
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest1 SELECT s.*
+INSERT OVERWRITE TABLE dest1_n89 SELECT s.*
 FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s
 WHERE s.ds='2008-04-08' and s.hr='11';
 
-INSERT OVERWRITE TABLE dest1 SELECT s.*
+INSERT OVERWRITE TABLE dest1_n89 SELECT s.*
 FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s
 WHERE s.ds='2008-04-08' and s.hr='11';
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n89.* FROM dest1_n89;
 
 select count(1) from srcbucket;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/sample2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample2.q b/ql/src/test/queries/clientpositive/sample2.q
index b9edb7d..3cd5521 100644
--- a/ql/src/test/queries/clientpositive/sample2.q
+++ b/ql/src/test/queries/clientpositive/sample2.q
@@ -1,14 +1,14 @@
 --! qt:dataset:srcbucket
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n29(key INT, value STRING) STORED AS TEXTFILE;
 
 -- input pruning, no sample filter
 -- default table sample columns
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest1 SELECT s.* 
+INSERT OVERWRITE TABLE dest1_n29 SELECT s.* 
 FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s;
 
-INSERT OVERWRITE TABLE dest1 SELECT s.* 
+INSERT OVERWRITE TABLE dest1_n29 SELECT s.* 
 FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s;
 
-SELECT dest1.* FROM dest1
+SELECT dest1_n29.* FROM dest1_n29
 order by key, value;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/sample4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample4.q b/ql/src/test/queries/clientpositive/sample4.q
index 49d7418..756d05c 100644
--- a/ql/src/test/queries/clientpositive/sample4.q
+++ b/ql/src/test/queries/clientpositive/sample4.q
@@ -1,14 +1,14 @@
 --! qt:dataset:srcbucket
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n118(key INT, value STRING) STORED AS TEXTFILE;
 
 -- bucket column is the same as table sample
 -- No need for sample filter
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest1 SELECT s.*
+INSERT OVERWRITE TABLE dest1_n118 SELECT s.*
 FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s;
 
-INSERT OVERWRITE TABLE dest1 SELECT s.*
+INSERT OVERWRITE TABLE dest1_n118 SELECT s.*
 FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s;
 
-SELECT dest1.* FROM dest1
+SELECT dest1_n118.* FROM dest1_n118
 order by key, value;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/sample5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample5.q b/ql/src/test/queries/clientpositive/sample5.q
index 5cb6ffd..a659f2b 100644
--- a/ql/src/test/queries/clientpositive/sample5.q
+++ b/ql/src/test/queries/clientpositive/sample5.q
@@ -1,16 +1,16 @@
 --! qt:dataset:srcbucket
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n69(key INT, value STRING) STORED AS TEXTFILE;
 
 -- SORT_QUERY_RESULTS
 
 -- no input pruning, sample filter
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest1 SELECT s.* 
+INSERT OVERWRITE TABLE dest1_n69 SELECT s.* 
 -- here's another test
 FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 on key) s;
 
-INSERT OVERWRITE TABLE dest1 SELECT s.* 
+INSERT OVERWRITE TABLE dest1_n69 SELECT s.* 
 -- here's another test
 FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 on key) s;
 
-SELECT dest1.* FROM dest1 SORT BY key, value;
+SELECT dest1_n69.* FROM dest1_n69 SORT BY key, value;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/sample6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample6.q b/ql/src/test/queries/clientpositive/sample6.q
index 6972897..292338c 100644
--- a/ql/src/test/queries/clientpositive/sample6.q
+++ b/ql/src/test/queries/clientpositive/sample6.q
@@ -1,17 +1,17 @@
 --! qt:dataset:srcbucket2
 --! qt:dataset:srcbucket
 set hive.mapred.mode=nonstrict;
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n27(key INT, value STRING) STORED AS TEXTFILE;
 
 -- both input pruning and sample filter
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest1 SELECT s.*
+INSERT OVERWRITE TABLE dest1_n27 SELECT s.*
 FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s;
 
-INSERT OVERWRITE TABLE dest1 SELECT s.*
+INSERT OVERWRITE TABLE dest1_n27 SELECT s.*
 FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s;
 
-SELECT dest1.* FROM dest1
+SELECT dest1_n27.* FROM dest1_n27
 order by key, value;
 
 EXPLAIN EXTENDED SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 4 OUT OF 4 on key) s

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/sample7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample7.q b/ql/src/test/queries/clientpositive/sample7.q
index e8f5336..dbbd262 100644
--- a/ql/src/test/queries/clientpositive/sample7.q
+++ b/ql/src/test/queries/clientpositive/sample7.q
@@ -1,15 +1,15 @@
 --! qt:dataset:srcbucket
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n160(key INT, value STRING) STORED AS TEXTFILE;
 
 -- both input pruning and sample filter
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE dest1 SELECT s.* 
+INSERT OVERWRITE TABLE dest1_n160 SELECT s.* 
 FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s
 WHERE s.key > 100;
 
-INSERT OVERWRITE TABLE dest1 SELECT s.* 
+INSERT OVERWRITE TABLE dest1_n160 SELECT s.* 
 FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s
 WHERE s.key > 100;
 
-SELECT dest1.* FROM dest1
+SELECT dest1_n160.* FROM dest1_n160
 order by key, value;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q b/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q
index 772b8a8..803ca91 100644
--- a/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q
+++ b/ql/src/test/queries/clientpositive/sample_islocalmode_hook_hadoop20.q
@@ -18,25 +18,25 @@ set hive.merge.smallfiles.avgsize=1;
 -- fixed in MAPREDUCE-2046 which is included in 0.22.
 
 -- create file inputs
-create table sih_i_part (key int, value string) partitioned by (p string);
-insert overwrite table sih_i_part partition (p='1') select key, value from src;
-insert overwrite table sih_i_part partition (p='2') select key+10000, value from src;
-insert overwrite table sih_i_part partition (p='3') select key+20000, value from src;
-create table sih_src as select key, value from sih_i_part order by key, value;
-create table sih_src2 as select key, value from sih_src order by key, value;
+create table sih_i_part_n0 (key int, value string) partitioned by (p string);
+insert overwrite table sih_i_part_n0 partition (p='1') select key, value from src;
+insert overwrite table sih_i_part_n0 partition (p='2') select key+10000, value from src;
+insert overwrite table sih_i_part_n0 partition (p='3') select key+20000, value from src;
+create table sih_src_n0 as select key, value from sih_i_part_n0 order by key, value;
+create table sih_src2_n0 as select key, value from sih_src_n0 order by key, value;
 
 set hive.exec.post.hooks = org.apache.hadoop.hive.ql.hooks.VerifyIsLocalModeHook ;
 set mapred.job.tracker=localhost:58;
 set hive.exec.mode.local.auto.input.files.max=1;
 
 -- Sample split, running locally limited by num tasks
-select count(1) from sih_src tablesample(1 percent);
+select count(1) from sih_src_n0 tablesample(1 percent);
 
 -- sample two tables
-select count(1) from sih_src tablesample(1 percent)a join sih_src2 tablesample(1 percent)b on a.key = b.key;
+select count(1) from sih_src_n0 tablesample(1 percent)a join sih_src2_n0 tablesample(1 percent)b on a.key = b.key;
 
 set hive.exec.mode.local.auto.inputbytes.max=1000;
 set hive.exec.mode.local.auto.input.files.max=4;
 
 -- sample split, running locally limited by max bytes
-select count(1) from sih_src tablesample(1 percent);
+select count(1) from sih_src_n0 tablesample(1 percent);

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q b/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q
index cb80ef4..1675263 100644
--- a/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q
+++ b/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q
@@ -14,12 +14,12 @@ set hive.compute.query.using.stats=true;
 -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
 
 -- create file inputs
-create table sih_i_part (key int, value string) partitioned by (p string);
-insert overwrite table sih_i_part partition (p='1') select key, value from src;
-insert overwrite table sih_i_part partition (p='2') select key+10000, value from src;
-insert overwrite table sih_i_part partition (p='3') select key+20000, value from src;
-create table sih_src as select key, value from sih_i_part order by key, value;
-create table sih_src2 as select key, value from sih_src order by key, value;
+create table sih_i_part_n1 (key int, value string) partitioned by (p string);
+insert overwrite table sih_i_part_n1 partition (p='1') select key, value from src;
+insert overwrite table sih_i_part_n1 partition (p='2') select key+10000, value from src;
+insert overwrite table sih_i_part_n1 partition (p='3') select key+20000, value from src;
+create table sih_src_n1 as select key, value from sih_i_part_n1 order by key, value;
+create table sih_src2_n1 as select key, value from sih_src_n1 order by key, value;
 
 set hive.exec.post.hooks = org.apache.hadoop.hive.ql.hooks.VerifyIsLocalModeHook;
 set mapreduce.framework.name=yarn;
@@ -34,16 +34,16 @@ set hive.sample.seednumber=7;
 
 -- sample split, running locally limited by num tasks
 
-desc formatted sih_src;
+desc formatted sih_src_n1;
 
-explain select count(1) from sih_src;
+explain select count(1) from sih_src_n1;
 
-select count(1) from sih_src;
+select count(1) from sih_src_n1;
 
-explain select count(1) from sih_src tablesample(1 percent);
+explain select count(1) from sih_src_n1 tablesample(1 percent);
 
-select count(1) from sih_src tablesample(1 percent);
+select count(1) from sih_src_n1 tablesample(1 percent);
 
-explain select count(1) from sih_src tablesample(10 rows);
+explain select count(1) from sih_src_n1 tablesample(10 rows);
 
-select count(1) from sih_src tablesample(10 rows);
+select count(1) from sih_src_n1 tablesample(10 rows);

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q
index df52696..7231123 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q
@@ -24,9 +24,9 @@ set hive.llap.io.enabled=false;
 -- Instead just one explain vectorization only detail
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n29(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data;
+load data local inpath '../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into table schema_evolution_data_n29;
 
 ------------------------------------------------------------------------------------------
 -- SECTION: ALTER TABLE ADD COLUMNS
@@ -35,48 +35,48 @@ load data local inpath '../../data/files/schema_evolution/schema_evolution_data.
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT
 --
 --
-CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+CREATE TABLE part_add_int_permute_select_n9(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
 
-insert into table part_add_int_permute_select partition(part=1) VALUES (1, 1111, 'new');
+insert into table part_add_int_permute_select_n9 partition(part=1) VALUES (1, 1111, 'new');
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table part_add_int_permute_select add columns(c int);
+alter table part_add_int_permute_select_n9 add columns(c int);
 
-insert into table part_add_int_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333);
+insert into table part_add_int_permute_select_n9 partition(part=1) VALUES (2, 2222, 'new', 3333);
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,part,a,b from part_add_int_permute_select;
-select insert_num,part,a,b,c from part_add_int_permute_select;
-select insert_num,part,c from part_add_int_permute_select;
+select insert_num,part,a,b from part_add_int_permute_select_n9;
+select insert_num,part,a,b,c from part_add_int_permute_select_n9;
+select insert_num,part,c from part_add_int_permute_select_n9;
 
-drop table part_add_int_permute_select;
+drop table part_add_int_permute_select_n9;
 
 
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT
 --
 --
-CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+CREATE TABLE part_add_int_string_permute_select_n9(insert_num int, a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
 
-insert into table part_add_int_string_permute_select partition(part=1) VALUES (1, 1111, 'new');
+insert into table part_add_int_string_permute_select_n9 partition(part=1) VALUES (1, 1111, 'new');
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table part_add_int_string_permute_select add columns(c int, d string);
+alter table part_add_int_string_permute_select_n9 add columns(c int, d string);
 
-insert into table part_add_int_string_permute_select partition(part=1) VALUES (2, 2222, 'new', 3333, '4444');
+insert into table part_add_int_string_permute_select_n9 partition(part=1) VALUES (2, 2222, 'new', 3333, '4444');
 
 explain vectorization only detail
-select insert_num,part,a,b,c,d from part_add_int_string_permute_select;
+select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n9;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,part,a,b from part_add_int_string_permute_select;
-select insert_num,part,a,b,c from part_add_int_string_permute_select;
-select insert_num,part,a,b,c,d from part_add_int_string_permute_select;
-select insert_num,part,a,c,d from part_add_int_string_permute_select;
-select insert_num,part,a,d from part_add_int_string_permute_select;
-select insert_num,part,c from part_add_int_string_permute_select;
-select insert_num,part,d from part_add_int_string_permute_select;
+select insert_num,part,a,b from part_add_int_string_permute_select_n9;
+select insert_num,part,a,b,c from part_add_int_string_permute_select_n9;
+select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n9;
+select insert_num,part,a,c,d from part_add_int_string_permute_select_n9;
+select insert_num,part,a,d from part_add_int_string_permute_select_n9;
+select insert_num,part,c from part_add_int_string_permute_select_n9;
+select insert_num,part,d from part_add_int_string_permute_select_n9;
 
-drop table part_add_int_string_permute_select;
+drop table part_add_int_string_permute_select_n9;
 
 
 
@@ -87,18 +87,18 @@ drop table part_add_int_string_permute_select;
 --
 -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> DOUBLE: (STRING, CHAR, VARCHAR)
 --
-CREATE TABLE part_change_string_group_double(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+CREATE TABLE part_change_string_group_double_n9(insert_num int, c1 STRING, c2 CHAR(50), c3 VARCHAR(50), b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
 
-insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data;
+insert into table part_change_string_group_double_n9 partition(part=1) SELECT insert_num, double_str, double_str, double_str, 'original' FROM schema_evolution_data_n29;
 
 -- Table-Non-Cascade CHANGE COLUMNS ...
-alter table part_change_string_group_double replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING);
+alter table part_change_string_group_double_n9 replace columns (insert_num int, c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, b STRING);
 
-insert into table part_change_string_group_double partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data WHERE insert_num = 111;
+insert into table part_change_string_group_double_n9 partition(part=1) SELECT insert_num, double1, double1, double1, 'new' FROM schema_evolution_data_n29 WHERE insert_num = 111;
 
-select insert_num,part,c1,c2,c3,b from part_change_string_group_double;
+select insert_num,part,c1,c2,c3,b from part_change_string_group_double_n9;
 
-drop table part_change_string_group_double;
+drop table part_change_string_group_double_n9;
 
 ------------------------------------------------------------------------------------------
 -- SECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP
@@ -107,18 +107,18 @@ drop table part_change_string_group_double;
 --
 -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for DATE_GROUP -> STRING_GROUP: DATE,TIMESTAMP, (STRING, CHAR, CHAR trunc, VARCHAR, VARCHAR trunc)
 --
-CREATE TABLE part_change_date_group_string_group_date_timestamp(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT)  clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+CREATE TABLE part_change_date_group_string_group_date_timestamp_n9(insert_num int, c1 DATE, c2 DATE, c3 DATE, c4 DATE, c5 DATE, c6 TIMESTAMP, c7 TIMESTAMP, c8 TIMESTAMP, c9 TIMESTAMP, c10 TIMESTAMP, b STRING) PARTITIONED BY(part INT)  clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
 
-insert into table part_change_date_group_string_group_date_timestamp partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data;
+insert into table part_change_date_group_string_group_date_timestamp_n9 partition(part=1) SELECT insert_num, date1, date1, date1, date1, date1, timestamp1, timestamp1, timestamp1, timestamp1, timestamp1, 'original' FROM schema_evolution_data_n29;
 
 -- Table-Non-Cascade CHANGE COLUMNS ...
-alter table part_change_date_group_string_group_date_timestamp replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING);
+alter table part_change_date_group_string_group_date_timestamp_n9 replace columns(insert_num int, c1 STRING, c2 CHAR(50), c3 CHAR(15), c4 VARCHAR(50), c5 VARCHAR(15), c6 STRING, c7 CHAR(50), c8 CHAR(15), c9 VARCHAR(50), c10 VARCHAR(15), b STRING);
 
-insert into table part_change_date_group_string_group_date_timestamp partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new');
+insert into table part_change_date_group_string_group_date_timestamp_n9 partition(part=1) VALUES (111, 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'new');
 
-select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp;
+select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_date_group_string_group_date_timestamp_n9;
 
-drop table part_change_date_group_string_group_date_timestamp;
+drop table part_change_date_group_string_group_date_timestamp_n9;
 
 
 
@@ -134,36 +134,36 @@ drop table part_change_date_group_string_group_date_timestamp;
 --           (TINYINT, SMALLINT, INT, BIGINT), VARCHAR and VARCHAR trunc
 --
 --
-CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group(insert_num int,
+CREATE TABLE part_change_numeric_group_string_group_multi_ints_string_group_n9(insert_num int,
              c1 tinyint, c2 smallint, c3 int, c4 bigint,
              c5 tinyint, c6 smallint, c7 int, c8 bigint, c9 tinyint, c10 smallint, c11 int, c12 bigint,
              c13 tinyint, c14 smallint, c15 int, c16 bigint, c17 tinyint, c18 smallint, c19 int, c20 bigint,
              b STRING) PARTITIONED BY(part INT);
 
-insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) SELECT insert_num,
+insert into table part_change_numeric_group_string_group_multi_ints_string_group_n9 partition(part=1) SELECT insert_num,
              tinyint1, smallint1, int1, bigint1,
              tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1,
              tinyint1, smallint1, int1, bigint1, tinyint1, smallint1, int1, bigint1,
-             'original' FROM schema_evolution_data;
+             'original' FROM schema_evolution_data_n29;
 
-select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group;
+select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n9;
 
 -- Table-Non-Cascade CHANGE COLUMNS ...
-alter table part_change_numeric_group_string_group_multi_ints_string_group replace columns (insert_num int,
+alter table part_change_numeric_group_string_group_multi_ints_string_group_n9 replace columns (insert_num int,
              c1 STRING, c2 STRING, c3 STRING, c4 STRING,
              c5 CHAR(50), c6 CHAR(50), c7 CHAR(50), c8 CHAR(50), c9 CHAR(5), c10 CHAR(5), c11 CHAR(5), c12 CHAR(5),
              c13 VARCHAR(50), c14 VARCHAR(50), c15 VARCHAR(50), c16 VARCHAR(50), c17 VARCHAR(5), c18 VARCHAR(5), c19 VARCHAR(5), c20 VARCHAR(5),
              b STRING) ;
 
-insert into table part_change_numeric_group_string_group_multi_ints_string_group partition(part=1) VALUES (111,
+insert into table part_change_numeric_group_string_group_multi_ints_string_group_n9 partition(part=1) VALUES (111,
             'filler', 'filler', 'filler', 'filler',
             'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
             'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
             'new');
 
-select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group;
+select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b from part_change_numeric_group_string_group_multi_ints_string_group_n9;
 
-drop table part_change_numeric_group_string_group_multi_ints_string_group;
+drop table part_change_numeric_group_string_group_multi_ints_string_group_n9;
 
 
 
@@ -174,36 +174,36 @@ drop table part_change_numeric_group_string_group_multi_ints_string_group;
 --            (DECIMAL, FLOAT, DOUBLE), VARCHAR and VARCHAR trunc
 --
 --
-CREATE TABLE part_change_numeric_group_string_group_floating_string_group(insert_num int,
+CREATE TABLE part_change_numeric_group_string_group_floating_string_group_n9(insert_num int,
               c1 decimal(38,18), c2 float, c3 double,
               c4 decimal(38,18), c5 float, c6 double, c7 decimal(38,18), c8 float, c9 double,
               c10 decimal(38,18), c11 float, c12 double, c13 decimal(38,18), c14 float, c15 double,
               b STRING) PARTITIONED BY(part INT);
 
-insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) SELECT insert_num,
+insert into table part_change_numeric_group_string_group_floating_string_group_n9 partition(part=1) SELECT insert_num,
               decimal1, float1, double1,
               decimal1, float1, double1, decimal1, float1, double1,
               decimal1, float1, double1, decimal1, float1, double1,
-             'original' FROM schema_evolution_data;
+             'original' FROM schema_evolution_data_n29;
 
-select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group;
+select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n9;
 
 -- Table-Non-Cascade CHANGE COLUMNS ...
-alter table part_change_numeric_group_string_group_floating_string_group replace columns (insert_num int,
+alter table part_change_numeric_group_string_group_floating_string_group_n9 replace columns (insert_num int,
               c1 STRING, c2 STRING, c3 STRING,
               c4 CHAR(50), c5 CHAR(50), c6 CHAR(50), c7 CHAR(7), c8 CHAR(7), c9 CHAR(7),
               c10 VARCHAR(50), c11 VARCHAR(50), c12 VARCHAR(50), c13 VARCHAR(7), c14 VARCHAR(7), c15 VARCHAR(7),
               b STRING);
 
-insert into table part_change_numeric_group_string_group_floating_string_group partition(part=1) VALUES (111,
+insert into table part_change_numeric_group_string_group_floating_string_group_n9 partition(part=1) VALUES (111,
              'filler', 'filler', 'filler',
              'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
              'filler', 'filler', 'filler', 'filler', 'filler', 'filler',
              'new');
 
-select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group;
+select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from part_change_numeric_group_string_group_floating_string_group_n9;
 
-drop table part_change_numeric_group_string_group_floating_string_group;
+drop table part_change_numeric_group_string_group_floating_string_group_n9;
 
 
 
@@ -215,34 +215,34 @@ drop table part_change_numeric_group_string_group_floating_string_group;
 -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for STRING_GROUP -> STRING_GROUP: STRING, (CHAR, CHAR trunc, VARCHAR, VARCHAR trunc) and
 --      CHAR, (VARCHAR, VARCHAR trunc, STRING) and VARCHAR, (CHAR, CHAR trunc, STRING)
 --
-CREATE TABLE part_change_string_group_string_group_string(insert_num int,
+CREATE TABLE part_change_string_group_string_group_string_n9(insert_num int,
            c1 string, c2 string, c3 string, c4 string,
            c5 CHAR(50), c6 CHAR(50), c7 CHAR(50),
            c8 VARCHAR(50), c9 VARCHAR(50), c10 VARCHAR(50), b STRING) PARTITIONED BY(part INT)  clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
 
-insert into table part_change_string_group_string_group_string partition(part=1) SELECT insert_num,
+insert into table part_change_string_group_string_group_string_n9 partition(part=1) SELECT insert_num,
            string2, string2, string2, string2,
            string2, string2, string2,
            string2, string2, string2,
-          'original' FROM schema_evolution_data;
+          'original' FROM schema_evolution_data_n29;
 
-select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string;
+select insert_num,part,c1,c2,c3,c4,b from part_change_string_group_string_group_string_n9;
 
 -- Table-Non-Cascade CHANGE COLUMNS ...
-alter table part_change_string_group_string_group_string replace columns (insert_num int,
+alter table part_change_string_group_string_group_string_n9 replace columns (insert_num int,
            c1 CHAR(50), c2 CHAR(9), c3 VARCHAR(50), c4 CHAR(9),
            c5 VARCHAR(50), c6 VARCHAR(9), c7 STRING,
            c8 CHAR(50), c9 CHAR(9), c10 STRING, b STRING) ;
 
-insert into table part_change_string_group_string_group_string partition(part=1) VALUES (111,
+insert into table part_change_string_group_string_group_string_n9 partition(part=1) VALUES (111,
           'filler', 'filler', 'filler', 'filler',
           'filler', 'filler', 'filler',
           'filler', 'filler', 'filler',
           'new');
 
-select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string;
+select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from part_change_string_group_string_group_string_n9;
 
-drop table part_change_string_group_string_group_string;
+drop table part_change_string_group_string_group_string_n9;
 
 
 ------------------------------------------------------------------------------------------
@@ -256,40 +256,40 @@ drop table part_change_string_group_string_group_string;
 --          INT, (BIGINT, DECIMAL, FLOAT, DOUBLE) and
 --          BIGINT, (DECIMAL, FLOAT, DOUBLE)
 --
-CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint(insert_num int,
+CREATE TABLE part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9(insert_num int,
                                 c1 tinyint, c2 tinyint, c3 tinyint, c4 tinyint, c5 tinyint, c6 tinyint,
                                 c7 smallint, c8 smallint, c9 smallint, c10 smallint, c11 smallint,
                                 c12 int, c13 int, c14 int, c15 int,
                                 c16 bigint, c17 bigint, c18 bigint,
                                 b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
 
-insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) SELECT insert_num,
+insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9 partition(part=1) SELECT insert_num,
                                 tinyint1, tinyint1, tinyint1, tinyint1, tinyint1, tinyint1,
                                 smallint1, smallint1, smallint1, smallint1, smallint1,
                                 int1, int1, int1, int1,
                                 bigint1, bigint1, bigint1, 
-                                'original' FROM schema_evolution_data;
+                                'original' FROM schema_evolution_data_n29;
 
-select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint;
+select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9;
 
 -- Table-Non-Cascade CHANGE COLUMNS ...
-alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint replace columns (insert_num int,
+alter table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9 replace columns (insert_num int,
              c1 SMALLINT, c2 INT, c3 BIGINT, c4 decimal(38,18), c5 FLOAT, c6 DOUBLE,
              c7 INT, c8 BIGINT, c9 decimal(38,18), c10 FLOAT, c11 DOUBLE,
              c12 BIGINT, c13 decimal(38,18), c14 FLOAT, c15 DOUBLE,
              c16 decimal(38,18), c17 FLOAT, c18 DOUBLE,
              b STRING) ;
 
-insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint partition(part=1) VALUES (111,
+insert into table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9 partition(part=1) VALUES (111,
             7000, 80000, 90000000, 1234.5678, 9876.543, 789.321,
             80000, 90000000, 1234.5678, 9876.543, 789.321,
             90000000, 1234.5678, 9876.543, 789.321,
             1234.5678, 9876.543, 789.321,
            'new');
 
-select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint;
+select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b from part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9;
 
-drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint;
+drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint_n9;
 
 
 
@@ -298,23 +298,23 @@ drop table part_change_lower_to_higher_numeric_group_tinyint_to_bigint;
 --          DECIMAL, (FLOAT, DOUBLE) and
 --          FLOAT, (DOUBLE)
 --
-CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float(insert_num int,
+CREATE TABLE part_change_lower_to_higher_numeric_group_decimal_to_float_n9(insert_num int,
            c1 decimal(38,18), c2 decimal(38,18),
            c3 float,
            b STRING) PARTITIONED BY(part INT) clustered by (c1) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
 
-insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) SELECT insert_num,
+insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n9 partition(part=1) SELECT insert_num,
            decimal1, decimal1,
            float1,
-          'original' FROM schema_evolution_data;
+          'original' FROM schema_evolution_data_n29;
 
-select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float;
+select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n9;
 
 -- Table-Non-Cascade CHANGE COLUMNS ...
-alter table part_change_lower_to_higher_numeric_group_decimal_to_float replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ;
+alter table part_change_lower_to_higher_numeric_group_decimal_to_float_n9 replace columns (insert_num int, c1 float, c2 double, c3 DOUBLE, b STRING) ;
 
-insert into table part_change_lower_to_higher_numeric_group_decimal_to_float partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new');
+insert into table part_change_lower_to_higher_numeric_group_decimal_to_float_n9 partition(part=1) VALUES (111, 1234.5678, 9876.543, 1234.5678, 'new');
 
-select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float;
+select insert_num,part,c1,c2,c3,b from part_change_lower_to_higher_numeric_group_decimal_to_float_n9;
 
-drop table part_change_lower_to_higher_numeric_group_decimal_to_float;
\ No newline at end of file
+drop table part_change_lower_to_higher_numeric_group_decimal_to_float_n9;
\ No newline at end of file