You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2018/05/21 20:25:17 UTC

[40/51] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/default_constraint.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/default_constraint.q b/ql/src/test/queries/clientpositive/default_constraint.q
index a86622b..981da63 100644
--- a/ql/src/test/queries/clientpositive/default_constraint.q
+++ b/ql/src/test/queries/clientpositive/default_constraint.q
@@ -7,51 +7,51 @@
  set hive.support.concurrency=true;
  set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
-CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647,
+CREATE TABLE numericDataType_n1(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647,
     d BIGINT DEFAULT  9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89)
     clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
-DESC FORMATTED numericDataType;
+DESC FORMATTED numericDataType_n1;
 
-EXPLAIN INSERT INTO numericDataType(a) values(3Y);
-INSERT INTO numericDataType(a) values(3Y);
-SELECT * FROM numericDataType;
+EXPLAIN INSERT INTO numericDataType_n1(a) values(3Y);
+INSERT INTO numericDataType_n1(a) values(3Y);
+SELECT * FROM numericDataType_n1;
 
-EXPLAIN INSERT INTO numericDataType(e,f) values(4.5, 678.4);
-INSERT INTO numericDataType(e,f) values(4.5, 678.4);
-SELECT * FROM numericDataType;
+EXPLAIN INSERT INTO numericDataType_n1(e,f) values(4.5, 678.4);
+INSERT INTO numericDataType_n1(e,f) values(4.5, 678.4);
+SELECT * FROM numericDataType_n1;
 
-DROP TABLE numericDataType;
+DROP TABLE numericDataType_n1;
 
   -- Date/time
-CREATE TABLE table1(d DATE DEFAULT DATE'2018-02-14', t TIMESTAMP DEFAULT TIMESTAMP'2016-02-22 12:45:07.000000000',
+CREATE TABLE table1_n16(d DATE DEFAULT DATE'2018-02-14', t TIMESTAMP DEFAULT TIMESTAMP'2016-02-22 12:45:07.000000000',
     tz timestamp with local time zone DEFAULT TIMESTAMPLOCALTZ'2016-01-03 12:26:34 America/Los_Angeles',
     d1 DATE DEFAULT current_date() ENABLE, t1 TIMESTAMP DEFAULT current_timestamp() DISABLE);
-DESC FORMATTED table1;
+DESC FORMATTED table1_n16;
 
-EXPLAIN INSERT INTO table1(t) values ("1985-12-31 12:45:07");
-INSERT INTO table1(t) values ("1985-12-31 12:45:07");
-SELECT d, t, tz,d1=current_date(), t1 from table1;
+EXPLAIN INSERT INTO table1_n16(t) values ("1985-12-31 12:45:07");
+INSERT INTO table1_n16(t) values ("1985-12-31 12:45:07");
+SELECT d, t, tz,d1=current_date(), t1 from table1_n16;
 
-EXPLAIN INSERT INTO table1(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259');
-INSERT INTO table1(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259');
-SELECT d, t, tz,d1=current_date(), t1=current_timestamp() from table1;
+EXPLAIN INSERT INTO table1_n16(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259');
+INSERT INTO table1_n16(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259');
+SELECT d, t, tz,d1=current_date(), t1=current_timestamp() from table1_n16;
 
-DROP TABLE table1;
+DROP TABLE table1_n16;
 
 -- string type
-CREATE TABLE table2(i STRING DEFAULT 'current_database()', j STRING DEFAULT current_user(),
+CREATE TABLE table2_n11(i STRING DEFAULT 'current_database()', j STRING DEFAULT current_user(),
     k STRING DEFAULT 'Current_User()', v varchar(350) DEFAULT cast('varchar_default_value' as varchar(350)),
     c char(20) DEFAULT cast('char_value' as char(20)))
     clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
-DESC FORMATTED table2;
-EXPLAIN INSERT INTO table2(i) values('default');
-INSERT INTO table2(i) values('default');
-SELECT i,j=current_user(),k,v,c FROM table2;
+DESC FORMATTED table2_n11;
+EXPLAIN INSERT INTO table2_n11(i) values('default');
+INSERT INTO table2_n11(i) values('default');
+SELECT i,j=current_user(),k,v,c FROM table2_n11;
 
-EXPLAIN INSERT INTO table2(v, c) values('varchar_default2', 'char');
-INSERT INTO table2(v, c) values('varchar_default2', 'char');
-SELECT i,j=current_user(),k,v,c FROM table2;
-DROP TABLE table2;
+EXPLAIN INSERT INTO table2_n11(v, c) values('varchar_default2', 'char');
+INSERT INTO table2_n11(v, c) values('varchar_default2', 'char');
+SELECT i,j=current_user(),k,v,c FROM table2_n11;
+DROP TABLE table2_n11;
 
 
 -- misc type
@@ -67,7 +67,7 @@ SELECT b, b1 from misc;
 DROP TABLE misc;
 
 -- CAST
-CREATE table t11(i int default cast(cast(4 as double) as int),
+CREATE table t11_n2(i int default cast(cast(4 as double) as int),
     b1 boolean default cast ('true' as boolean), b2 int default cast (5.67 as int),
     b3 tinyint default cast (45 as tinyint), b4 float default cast (45.4 as float),
     b5 bigint default cast (567 as bigint), b6 smallint default cast (88 as smallint),
@@ -77,55 +77,55 @@ CREATE table t11(i int default cast(cast(4 as double) as int),
      ts timestamp default cast('2016-01-01 12:01:01' as timestamp),
      dc decimal(8,2) default cast(4.5 as decimal(8,2)),
      c2 double default cast(5 as double), c4 char(2) default cast(cast(cast('ab' as string) as varchar(2)) as char(2)));
-DESC FORMATTED t11;
-EXPLAIN INSERT INTO t11(c4) values('vi');
-INSERT INTO t11(c4) values('vi');
-SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast(current_timestamp() as varchar(50)), k=cast(current_user() as string), c2, c4 from t11;
+DESC FORMATTED t11_n2;
+EXPLAIN INSERT INTO t11_n2(c4) values('vi');
+INSERT INTO t11_n2(c4) values('vi');
+SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast(current_timestamp() as varchar(50)), k=cast(current_user() as string), c2, c4 from t11_n2;
 
-EXPLAIN INSERT INTO t11(b1,c4) values(true,'ga');
-INSERT INTO t11(c4) values('vi');
-SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast(current_timestamp() as varchar(50)), k=cast(current_user() as string), c2, c4 from t11;
+EXPLAIN INSERT INTO t11_n2(b1,c4) values(true,'ga');
+INSERT INTO t11_n2(c4) values('vi');
+SELECT ts, tz1, dc, b1,b2,b3,b4,b5,b6,j=cast(current_timestamp() as varchar(50)), k=cast(current_user() as string), c2, c4 from t11_n2;
 
-DROP TABLE t11;
+DROP TABLE t11_n2;
 
 -- alter table
 -- drop constraint
-CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647,
+CREATE TABLE numericDataType_n1(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647,
     d BIGINT DEFAULT  9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89)
     clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
-ALTER TABLE numericDataType DROP CONSTRAINT tinyint_constraint;
-DESC FORMATTED numericDataType;
+ALTER TABLE numericDataType_n1 DROP CONSTRAINT tinyint_constraint;
+DESC FORMATTED numericDataType_n1;
 
-EXPLAIN INSERT INTO numericDataType(b) values(456);
-INSERT INTO numericDataType(b) values(456);
-SELECT * from numericDataType;
+EXPLAIN INSERT INTO numericDataType_n1(b) values(456);
+INSERT INTO numericDataType_n1(b) values(456);
+SELECT * from numericDataType_n1;
 
 -- add another constraint on same column
-ALTER TABLE numericDataType ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE;
-DESC FORMATTED numericDataType;
-EXPLAIN INSERT INTO numericDataType(b) values(56);
-INSERT INTO numericDataType(b) values(456);
-SELECT * from numericDataType;
+ALTER TABLE numericDataType_n1 ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE;
+DESC FORMATTED numericDataType_n1;
+EXPLAIN INSERT INTO numericDataType_n1(b) values(56);
+INSERT INTO numericDataType_n1(b) values(456);
+SELECT * from numericDataType_n1;
 
 -- alter table change column with constraint to add NOT NULL and then DEFAULT
-ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT second_null_constraint NOT NULL ENABLE;
-DESC FORMATTED numericDataType;
-ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT default_constraint DEFAULT 127Y ENABLE;
-DESC FORMATTED numericDataType;
-EXPLAIN INSERT INTO numericDataType(f) values(847.45); --plan should have both DEFAULT and NOT NULL
-INSERT INTO numericDataType(f) values(847.45);
-Select * from numericDataType;
-DESC FORMATTED numericDataType;
+ALTER TABLE numericDataType_n1 CHANGE a a TINYINT CONSTRAINT second_null_constraint NOT NULL ENABLE;
+DESC FORMATTED numericDataType_n1;
+ALTER TABLE numericDataType_n1 CHANGE a a TINYINT CONSTRAINT default_constraint DEFAULT 127Y ENABLE;
+DESC FORMATTED numericDataType_n1;
+EXPLAIN INSERT INTO numericDataType_n1(f) values(847.45); --plan should have both DEFAULT and NOT NULL
+INSERT INTO numericDataType_n1(f) values(847.45);
+Select * from numericDataType_n1;
+DESC FORMATTED numericDataType_n1;
 
 -- drop constraint and add with same name again
-ALTER TABLE numericDataType DROP CONSTRAINT default_constraint;
-DESC FORMATTED numericDataType;
-ALTER TABLE numericDataType CHANGE a a TINYINT CONSTRAINT default_constraint DEFAULT 108Y ENABLE;
-DESC FORMATTED numericDataType;
-EXPLAIN INSERT INTO numericDataType(f) values(847.45);
-INSERT INTO numericDataType(f) values(847.45);
-Select * from numericDataType;
-DROP TABLE numericDataType;
+ALTER TABLE numericDataType_n1 DROP CONSTRAINT default_constraint;
+DESC FORMATTED numericDataType_n1;
+ALTER TABLE numericDataType_n1 CHANGE a a TINYINT CONSTRAINT default_constraint DEFAULT 108Y ENABLE;
+DESC FORMATTED numericDataType_n1;
+EXPLAIN INSERT INTO numericDataType_n1(f) values(847.45);
+INSERT INTO numericDataType_n1(f) values(847.45);
+Select * from numericDataType_n1;
+DROP TABLE numericDataType_n1;
 
 -- create default with maximum length allowed for default val (255)
 create table t (i int, j string default
@@ -139,45 +139,45 @@ drop table t;
 -- partitioned table
 set hive.exec.dynamic.partition.mode=nonstrict;
 -- Table with partition
-CREATE TABLE tablePartitioned (a STRING NOT NULL ENFORCED, url STRING constraint bdc1 default 'http://localhost',
+CREATE TABLE tablePartitioned_n0 (a STRING NOT NULL ENFORCED, url STRING constraint bdc1 default 'http://localhost',
     c STRING NOT NULL ENFORCED)
     PARTITIONED BY (p1 STRING, p2 INT);
 
 -- Insert into
-explain INSERT INTO tablePartitioned partition(p1='today', p2=10) values('not', 'null', 'constraint');
-INSERT INTO tablePartitioned partition(p1='today', p2=10) values('not', 'null', 'constraint');
-DROP TABLE tablePartitioned;
+explain INSERT INTO tablePartitioned_n0 partition(p1='today', p2=10) values('not', 'null', 'constraint');
+INSERT INTO tablePartitioned_n0 partition(p1='today', p2=10) values('not', 'null', 'constraint');
+DROP TABLE tablePartitioned_n0;
 
 -- try constraint with direct sql as false
 set hive.metastore.try.direct.sql=false;
-CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647,
+CREATE TABLE numericDataType_n1(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647,
     d BIGINT DEFAULT  9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f DECIMAL(9,2) DEFAULT 1234567.89)
     clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
-ALTER TABLE numericDataType DROP CONSTRAINT tinyint_constraint;
-DESC FORMATTED numericDataType;
+ALTER TABLE numericDataType_n1 DROP CONSTRAINT tinyint_constraint;
+DESC FORMATTED numericDataType_n1;
 
-EXPLAIN INSERT INTO numericDataType(b) values(456);
-INSERT INTO numericDataType(b) values(456);
-SELECT * from numericDataType;
+EXPLAIN INSERT INTO numericDataType_n1(b) values(456);
+INSERT INTO numericDataType_n1(b) values(456);
+SELECT * from numericDataType_n1;
 
 -- add another constraint on same column
-ALTER TABLE numericDataType ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE;
-DESC FORMATTED numericDataType;
-EXPLAIN INSERT INTO numericDataType(b) values(56);
-INSERT INTO numericDataType(b) values(456);
-SELECT * from numericDataType;
-DROP TABLE numericDataType;
+ALTER TABLE numericDataType_n1 ADD CONSTRAINT uk1 UNIQUE(a,b) DISABLE NOVALIDATE;
+DESC FORMATTED numericDataType_n1;
+EXPLAIN INSERT INTO numericDataType_n1(b) values(56);
+INSERT INTO numericDataType_n1(b) values(456);
+SELECT * from numericDataType_n1;
+DROP TABLE numericDataType_n1;
 
 -- Following all are existing BUGS
 -- BUG1: alter table change constraint doesn't work, so following not working
--- ALTER TABLE numericDataType change a a TINYINT CONSTRAINT default_constraint DEFAULT 1Y ENABLE; -- change default val
--- ALTER TABLE numericDataType change a a TINYINT CONSTRAINT default_constraint_second DEFAULT 1Y ENABLE; -- change constraint name
--- ALTER TABLE numericDataType change a a TINYINT CONSTRAINT default_constraint_second DEFAULT 1Y DISABLE; -- DISABLE constraint
+-- ALTER TABLE numericDataType_n1 change a a TINYINT CONSTRAINT default_constraint DEFAULT 1Y ENABLE; -- change default val
+-- ALTER TABLE numericDataType_n1 change a a TINYINT CONSTRAINT default_constraint_second DEFAULT 1Y ENABLE; -- change constraint name
+-- ALTER TABLE numericDataType_n1 change a a TINYINT CONSTRAINT default_constraint_second DEFAULT 1Y DISABLE; -- DISABLE constraint
 -- BUG2: ADD column not working
--- ALTER TABLE numericDataType add columns (dd double);
+-- ALTER TABLE numericDataType_n1 add columns (dd double);
 --BUG3: Following add multiple constraints
---ALTER TABLE numericDataType CHANGE c c INT DEFAULT cast(4.5 as INT);
+--ALTER TABLE numericDataType_n1 CHANGE c c INT DEFAULT cast(4.5 as INT);
 -- BUG4 Replace column doesn't work, so following not workiing
--- alter table numericDataType replace columns (a TINYINT);
+-- alter table numericDataType_n1 replace columns (a TINYINT);
 -- BUG5: select current_database() as default doesn't work
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/default_file_format.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/default_file_format.q b/ql/src/test/queries/clientpositive/default_file_format.q
index 24f4c17..40021e0 100644
--- a/ql/src/test/queries/clientpositive/default_file_format.q
+++ b/ql/src/test/queries/clientpositive/default_file_format.q
@@ -1,62 +1,62 @@
-create table t (c int);
+create table t_n2 (c int);
 
 set hive.default.fileformat.managed=orc;
 
 create table o (c int);
 
-create external table e (c int) location 'pfile://${system:test.tmp.dir}/foo';
+create external table e_n1 (c int) location 'pfile://${system:test.tmp.dir}/foo';
 
-create table i (c int) location 'pfile://${system:test.tmp.dir}/bar';
+create table i_n0 (c int) location 'pfile://${system:test.tmp.dir}/bar';
 
 set hive.default.fileformat=orc;
 
 create table io (c int);
 create external table e2 (c int) location 'pfile://${system:test.tmp.dir}/bar';
 
-describe formatted t;
+describe formatted t_n2;
 describe formatted o;
 describe formatted io;
-describe formatted e;
-describe formatted i;
+describe formatted e_n1;
+describe formatted i_n0;
 describe formatted e2;
 
-drop table t;
+drop table t_n2;
 drop table o;
 drop table io;
-drop table e;
-drop table i;
+drop table e_n1;
+drop table i_n0;
 drop table e2;
 
 set hive.default.fileformat=TextFile;
 set hive.default.fileformat.managed=none;
 
-create table t (c int);
+create table t_n2 (c int);
 
 set hive.default.fileformat.managed=parquet;
 
 create table o (c int);
 
-create external table e (c int) location 'pfile://${system:test.tmp.dir}/foo';
+create external table e_n1 (c int) location 'pfile://${system:test.tmp.dir}/foo';
 
-create table i (c int) location 'pfile://${system:test.tmp.dir}/bar';
+create table i_n0 (c int) location 'pfile://${system:test.tmp.dir}/bar';
 
 set hive.default.fileformat=parquet;
 
 create table io (c int);
 create external table e2 (c int) location 'pfile://${system:test.tmp.dir}/bar';
 
-describe formatted t;
+describe formatted t_n2;
 describe formatted o;
 describe formatted io;
-describe formatted e;
-describe formatted i;
+describe formatted e_n1;
+describe formatted i_n0;
 describe formatted e2;
 
-drop table t;
+drop table t_n2;
 drop table o;
 drop table io;
-drop table e;
-drop table i;
+drop table e_n1;
+drop table i_n0;
 drop table e2;
 
 set hive.default.fileformat=TextFile;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/deleteAnalyze.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/deleteAnalyze.q b/ql/src/test/queries/clientpositive/deleteAnalyze.q
index 5293ddf..c60da97 100644
--- a/ql/src/test/queries/clientpositive/deleteAnalyze.q
+++ b/ql/src/test/queries/clientpositive/deleteAnalyze.q
@@ -3,32 +3,32 @@ set hive.explain.user=true;
 
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/testdeci2;
 
-create table testdeci2(
+create table testdeci2_n0(
 id int,
 amount decimal(10,3),
 sales_tax decimal(10,3),
 item string)
 stored as orc location '${system:test.tmp.dir}/testdeci2';
 
-insert into table testdeci2 values(1,12.123,12345.123,'desk1'),(2,123.123,1234.123,'desk2');
+insert into table testdeci2_n0 values(1,12.123,12345.123,'desk1'),(2,123.123,1234.123,'desk2');
 
-describe formatted testdeci2;
+describe formatted testdeci2_n0;
 
 dfs -rmr ${system:test.tmp.dir}/testdeci2/000000_0;
 
-describe formatted testdeci2 amount;
+describe formatted testdeci2_n0 amount;
 
-analyze table testdeci2 compute statistics for columns;
+analyze table testdeci2_n0 compute statistics for columns;
 
-describe formatted testdeci2;
+describe formatted testdeci2_n0;
 
 set hive.stats.fetch.column.stats=true;
 
-analyze table testdeci2 compute statistics for columns;
+analyze table testdeci2_n0 compute statistics for columns;
 
 explain
 select s.id,
 coalesce(d.amount,0) as sales,
 coalesce(d.sales_tax,0) as tax
-from testdeci2 s join testdeci2 d
+from testdeci2_n0 s join testdeci2_n0 d
 on s.item=d.item and d.id=2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/desc_tbl_part_cols.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/desc_tbl_part_cols.q b/ql/src/test/queries/clientpositive/desc_tbl_part_cols.q
index 89e4931..7723af0 100644
--- a/ql/src/test/queries/clientpositive/desc_tbl_part_cols.q
+++ b/ql/src/test/queries/clientpositive/desc_tbl_part_cols.q
@@ -1,7 +1,7 @@
-create table t1 (a int, b string) partitioned by (c int, d string);
-describe t1;
+create table t1_n62 (a int, b string) partitioned by (c int, d string);
+describe t1_n62;
 
 set hive.display.partition.cols.separately=false;
-describe t1;
+describe t1_n62;
 
 set hive.display.partition.cols.separately=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/describe_comment_indent.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/describe_comment_indent.q b/ql/src/test/queries/clientpositive/describe_comment_indent.q
index 310b694..0458298 100644
--- a/ql/src/test/queries/clientpositive/describe_comment_indent.q
+++ b/ql/src/test/queries/clientpositive/describe_comment_indent.q
@@ -1,6 +1,6 @@
 -- test comment indent processing for multi-line comments
 
-CREATE TABLE test_table(
+CREATE TABLE test_table_n13(
     col1 INT COMMENT 'col1 one line comment',
     col2 STRING COMMENT 'col2
 two lines comment',
@@ -10,5 +10,5 @@ comment')
 COMMENT 'table comment
 two lines';
 
-DESCRIBE test_table;
-DESCRIBE FORMATTED test_table;
+DESCRIBE test_table_n13;
+DESCRIBE FORMATTED test_table_n13;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/describe_formatted_view_partitioned_json.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/describe_formatted_view_partitioned_json.q b/ql/src/test/queries/clientpositive/describe_formatted_view_partitioned_json.q
index 97fc0cd..c168bd6 100644
--- a/ql/src/test/queries/clientpositive/describe_formatted_view_partitioned_json.q
+++ b/ql/src/test/queries/clientpositive/describe_formatted_view_partitioned_json.q
@@ -1,18 +1,18 @@
 --! qt:dataset:src
 set hive.ddl.output.format=json;
 
-DROP VIEW view_partitioned;
+DROP VIEW view_partitioned_n0;
 
-CREATE VIEW view_partitioned
+CREATE VIEW view_partitioned_n0
 PARTITIONED ON (value)
 AS
 SELECT key, value
 FROM src
 WHERE key=86;
 
-ALTER VIEW view_partitioned
+ALTER VIEW view_partitioned_n0
 ADD PARTITION (value='val_86');
 
-DESCRIBE FORMATTED view_partitioned PARTITION (value='val_86');
+DESCRIBE FORMATTED view_partitioned_n0 PARTITION (value='val_86');
 
-DROP VIEW view_partitioned;
+DROP VIEW view_partitioned_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/describe_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/describe_table.q b/ql/src/test/queries/clientpositive/describe_table.q
index 4016236..69fec92 100644
--- a/ql/src/test/queries/clientpositive/describe_table.q
+++ b/ql/src/test/queries/clientpositive/describe_table.q
@@ -42,7 +42,7 @@ CREATE TABLE IF NOT EXISTS name1 (name1 int, name2 string) PARTITIONED BY (name3
 ALTER TABLE name1 ADD PARTITION (name3=1);
 CREATE TABLE IF NOT EXISTS name2 (name3 int, name4 string);
 use name2;
-CREATE TABLE IF NOT EXISTS table1 (col1 int, col2 string);
+CREATE TABLE IF NOT EXISTS table1_n18 (col1 int, col2 string);
 
 use default;
 DESCRIBE name1.name1;
@@ -66,23 +66,23 @@ DESCRIBE name1.name2;
 DESCRIBE name1.name2 name3;
 DESCRIBE name1.name2 name4;
 
-DESCRIBE name2.table1;
-DESCRIBE name2.table1 col1;
-DESCRIBE name2.table1 col2;
+DESCRIBE name2.table1_n18;
+DESCRIBE name2.table1_n18 col1;
+DESCRIBE name2.table1_n18 col2;
 use name2;
-DESCRIBE table1;
-DESCRIBE table1 col1;
-DESCRIBE table1 col2;
+DESCRIBE table1_n18;
+DESCRIBE table1_n18 col1;
+DESCRIBE table1_n18 col2;
 
-DESCRIBE name2.table1;
-DESCRIBE name2.table1 col1;
-DESCRIBE name2.table1 col2;
+DESCRIBE name2.table1_n18;
+DESCRIBE name2.table1_n18 col1;
+DESCRIBE name2.table1_n18 col2;
 
-DROP TABLE IF EXISTS table1;
+DROP TABLE IF EXISTS table1_n18;
 use name1;
 DROP TABLE IF EXISTS name1;
 DROP TABLE IF EXISTS name2;
 use name2;
-DROP TABLE IF EXISTS table1;
+DROP TABLE IF EXISTS table1_n18;
 DROP DATABASE IF EXISTS name1;
 DROP DATABASE IF EXISTS name2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q b/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
index 4b14673..84ed8f3 100644
--- a/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
+++ b/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
@@ -8,18 +8,18 @@ set hive.merge.mapredfiles=true;
 set hive.merge.sparkfiles=true;
 
 
-CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS;
+CREATE TABLE bucket2_1_n0(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS;
 
 explain extended
-insert overwrite table bucket2_1
+insert overwrite table bucket2_1_n0
 select * from src;
 
-insert overwrite table bucket2_1
+insert overwrite table bucket2_1_n0
 select * from src;
 
 explain
-select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key;
+select * from bucket2_1_n0 tablesample (bucket 1 out of 2) s order by key;
 
-select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key;
+select * from bucket2_1_n0 tablesample (bucket 1 out of 2) s order by key;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/display_colstats_tbllvl.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/display_colstats_tbllvl.q b/ql/src/test/queries/clientpositive/display_colstats_tbllvl.q
index debd8a9..7ef651c 100644
--- a/ql/src/test/queries/clientpositive/display_colstats_tbllvl.q
+++ b/ql/src/test/queries/clientpositive/display_colstats_tbllvl.q
@@ -1,6 +1,6 @@
-DROP TABLE IF EXISTS UserVisits_web_text_none;
+DROP TABLE IF EXISTS UserVisits_web_text_none_n0;
 
-CREATE TABLE UserVisits_web_text_none (
+CREATE TABLE UserVisits_web_text_none_n0 (
   sourceIP string,
   destURL string,
   visitDate string,
@@ -12,23 +12,23 @@ CREATE TABLE UserVisits_web_text_none (
   avgTimeOnSite int)
 row format delimited fields terminated by '|'  stored as textfile;
 
-LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none;
+LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none_n0;
 
-desc extended UserVisits_web_text_none sourceIP;
-desc formatted UserVisits_web_text_none sourceIP;
+desc extended UserVisits_web_text_none_n0 sourceIP;
+desc formatted UserVisits_web_text_none_n0 sourceIP;
 
 explain
-analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue;
+analyze table UserVisits_web_text_none_n0 compute statistics for columns sourceIP, avgTimeOnSite, adRevenue;
 
 explain extended
-analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue;
+analyze table UserVisits_web_text_none_n0 compute statistics for columns sourceIP, avgTimeOnSite, adRevenue;
 
-analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue;
-desc formatted UserVisits_web_text_none sourceIP;
-desc formatted UserVisits_web_text_none avgTimeOnSite;
-desc formatted UserVisits_web_text_none adRevenue;
+analyze table UserVisits_web_text_none_n0 compute statistics for columns sourceIP, avgTimeOnSite, adRevenue;
+desc formatted UserVisits_web_text_none_n0 sourceIP;
+desc formatted UserVisits_web_text_none_n0 avgTimeOnSite;
+desc formatted UserVisits_web_text_none_n0 adRevenue;
 
-CREATE TABLE empty_tab(
+CREATE TABLE empty_tab_n0(
    a int,
    b double,
    c string,
@@ -36,18 +36,18 @@ CREATE TABLE empty_tab(
    e binary)
 row format delimited fields terminated by '|'  stored as textfile;
 
-desc formatted empty_tab a;
+desc formatted empty_tab_n0 a;
 explain
-analyze table empty_tab compute statistics for columns a,b,c,d,e;
+analyze table empty_tab_n0 compute statistics for columns a,b,c,d,e;
 
-analyze table empty_tab compute statistics for columns a,b,c,d,e;
-desc formatted empty_tab a;
-desc formatted empty_tab b;
+analyze table empty_tab_n0 compute statistics for columns a,b,c,d,e;
+desc formatted empty_tab_n0 a;
+desc formatted empty_tab_n0 b;
 
 CREATE DATABASE test;
 USE test;
 
-CREATE TABLE UserVisits_web_text_none (
+CREATE TABLE UserVisits_web_text_none_n0 (
   sourceIP string,
   destURL string,
   visitDate string,
@@ -59,17 +59,17 @@ CREATE TABLE UserVisits_web_text_none (
   avgTimeOnSite int)
 row format delimited fields terminated by '|'  stored as textfile;
 
-LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none;
+LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none_n0;
 
-desc extended UserVisits_web_text_none sourceIP;
-desc extended test.UserVisits_web_text_none sourceIP;
-desc extended default.UserVisits_web_text_none sourceIP;
-desc formatted UserVisits_web_text_none sourceIP;
-desc formatted test.UserVisits_web_text_none sourceIP;
-desc formatted default.UserVisits_web_text_none sourceIP;
+desc extended UserVisits_web_text_none_n0 sourceIP;
+desc extended test.UserVisits_web_text_none_n0 sourceIP;
+desc extended default.UserVisits_web_text_none_n0 sourceIP;
+desc formatted UserVisits_web_text_none_n0 sourceIP;
+desc formatted test.UserVisits_web_text_none_n0 sourceIP;
+desc formatted default.UserVisits_web_text_none_n0 sourceIP;
 
-analyze table UserVisits_web_text_none compute statistics for columns sKeyword;
-desc extended UserVisits_web_text_none sKeyword;
-desc formatted UserVisits_web_text_none sKeyword;
-desc formatted test.UserVisits_web_text_none sKeyword;
+analyze table UserVisits_web_text_none_n0 compute statistics for columns sKeyword;
+desc extended UserVisits_web_text_none_n0 sKeyword;
+desc formatted UserVisits_web_text_none_n0 sKeyword;
+desc formatted test.UserVisits_web_text_none_n0 sKeyword;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/distinct_stats.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/distinct_stats.q b/ql/src/test/queries/clientpositive/distinct_stats.q
index 0cc795b..1a95caa 100644
--- a/ql/src/test/queries/clientpositive/distinct_stats.q
+++ b/ql/src/test/queries/clientpositive/distinct_stats.q
@@ -2,20 +2,20 @@
 set hive.stats.autogather=true;
 
 set hive.compute.query.using.stats=true;
-create table t1 (a string, b string);
+create table t1_n11 (a string, b string);
 
-insert into table t1 select * from src;
+insert into table t1_n11 select * from src;
 
-analyze table t1 compute statistics for columns a,b;
+analyze table t1_n11 compute statistics for columns a,b;
 
 explain 
-select count(distinct b) from t1 group by a;
+select count(distinct b) from t1_n11 group by a;
 
 explain 
-select distinct(b) from t1;
+select distinct(b) from t1_n11;
 
 explain 
-select a, count(*) from t1 group by a;
+select a, count(*) from t1_n11 group by a;
 
-drop table t1;
+drop table t1_n11;
 set hive.compute.query.using.stats = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/distinct_windowing.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/distinct_windowing.q b/ql/src/test/queries/clientpositive/distinct_windowing.q
index ca863cb..2ce1aca 100644
--- a/ql/src/test/queries/clientpositive/distinct_windowing.q
+++ b/ql/src/test/queries/clientpositive/distinct_windowing.q
@@ -1,6 +1,6 @@
-drop table over10k;
+drop table over10k_n15;
 
-create table over10k(
+create table over10k_n15(
            t tinyint,
            si smallint,
            i int,
@@ -15,25 +15,25 @@ create table over10k(
        row format delimited
        fields terminated by '|';
 
-load data local inpath '../../data/files/over10k' into table over10k;
+load data local inpath '../../data/files/over10k' into table over10k_n15;
 
 explain
-select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10;
+select distinct first_value(t) over ( partition by si order by i ) from over10k_n15 limit 10;
 
-select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10;
+select distinct first_value(t) over ( partition by si order by i ) from over10k_n15 limit 10;
 
 explain
 select distinct last_value(i) over ( partition by si order by i )
-from over10k limit 10;
+from over10k_n15 limit 10;
 
 select distinct last_value(i) over ( partition by si order by i )
-from over10k limit 10;
+from over10k_n15 limit 10;
 
 explain
 select distinct last_value(i) over ( partition by si order by i ),
                 first_value(t)  over ( partition by si order by i )
-from over10k limit 50;
+from over10k_n15 limit 50;
 
 select distinct last_value(i) over ( partition by si order by i ),
                 first_value(t)  over ( partition by si order by i )
-from over10k limit 50;
+from over10k_n15 limit 50;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/distinct_windowing_no_cbo.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/distinct_windowing_no_cbo.q b/ql/src/test/queries/clientpositive/distinct_windowing_no_cbo.q
index 36f071f..0748b80 100644
--- a/ql/src/test/queries/clientpositive/distinct_windowing_no_cbo.q
+++ b/ql/src/test/queries/clientpositive/distinct_windowing_no_cbo.q
@@ -1,8 +1,8 @@
 set hive.cbo.enable=false;
 
-drop table over10k;
+drop table over10k_n14;
 
-create table over10k(
+create table over10k_n14(
            t tinyint,
            si smallint,
            i int,
@@ -17,47 +17,47 @@ create table over10k(
        row format delimited
        fields terminated by '|';
 
-load data local inpath '../../data/files/over10k' into table over10k;
+load data local inpath '../../data/files/over10k' into table over10k_n14;
 
 explain
-select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10;
+select distinct first_value(t) over ( partition by si order by i ) from over10k_n14 limit 10;
 
-select distinct first_value(t) over ( partition by si order by i ) from over10k limit 10;
+select distinct first_value(t) over ( partition by si order by i ) from over10k_n14 limit 10;
 
 explain
 select distinct last_value(i) over ( partition by si order by i )
-from over10k limit 10;
+from over10k_n14 limit 10;
 
 select distinct last_value(i) over ( partition by si order by i )
-from over10k limit 10;
+from over10k_n14 limit 10;
 
 explain
 select distinct last_value(i) over ( partition by si order by i ),
                 first_value(t)  over ( partition by si order by i )
-from over10k limit 50;
+from over10k_n14 limit 50;
 
 select distinct last_value(i) over ( partition by si order by i ),
                 first_value(t)  over ( partition by si order by i )
-from over10k limit 50;
+from over10k_n14 limit 50;
 
 explain
 select si, max(f) mf, rank() over ( partition by si order by mf )
-FROM over10k
+FROM over10k_n14
 GROUP BY si
 HAVING max(f) > 0
 limit 50;
 
 select si, max(f) mf, rank() over ( partition by si order by mf )
-FROM over10k
+FROM over10k_n14
 GROUP BY si
 HAVING max(f) > 0
 limit 50;
 
 explain
 select distinct si, rank() over ( partition by si order by i )
-FROM over10k
+FROM over10k_n14
 limit 50;
 
 select distinct si, rank() over ( partition by si order by i )
-FROM over10k
+FROM over10k_n14
 limit 50;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/dp_counter_mm.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dp_counter_mm.q b/ql/src/test/queries/clientpositive/dp_counter_mm.q
index 8f1afc1..91c4f42 100644
--- a/ql/src/test/queries/clientpositive/dp_counter_mm.q
+++ b/ql/src/test/queries/clientpositive/dp_counter_mm.q
@@ -5,48 +5,48 @@ set hive.exec.max.dynamic.partitions=200;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
-drop table src2;
-create table src2 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+drop table src2_n5;
+create table src2_n5 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 
 -- regular insert overwrite + insert into
 
 SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter;
-insert overwrite table src2 partition (value) select * from src where key < 100;
-insert into table src2 partition (value) select * from src where key < 200;
+insert overwrite table src2_n5 partition (value) select * from src where key < 100;
+insert into table src2_n5 partition (value) select * from src where key < 200;
 
-drop table src2;
-create table src2 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+drop table src2_n5;
+create table src2_n5 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 
-insert overwrite table src2 partition (value) select * from src where key < 200;
-insert into table src2 partition (value) select * from src where key < 300;
+insert overwrite table src2_n5 partition (value) select * from src where key < 200;
+insert into table src2_n5 partition (value) select * from src where key < 300;
 
 -- multi insert overwrite + insert into
 
-drop table src2;
-drop table src3;
-create table src2 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
-create table src3 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+drop table src2_n5;
+drop table src3_n1;
+create table src2_n5 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+create table src3_n1 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 
 from src
-insert overwrite table src2 partition (value) select * where key < 100
-insert overwrite table src3 partition (value) select * where key >= 100 and key < 200;
+insert overwrite table src2_n5 partition (value) select * where key < 100
+insert overwrite table src3_n1 partition (value) select * where key >= 100 and key < 200;
 
 from src
-insert into table src2 partition (value) select * where key < 100
-insert into table src3 partition (value) select * where key >= 100 and key < 300;
+insert into table src2_n5 partition (value) select * where key < 100
+insert into table src3_n1 partition (value) select * where key >= 100 and key < 300;
 
 -- union all insert overwrite + insert into
 
-drop table src2;
-create table src2 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+drop table src2_n5;
+create table src2_n5 (key int) partitioned by (value string) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 
-insert overwrite table src2 partition (value)
+insert overwrite table src2_n5 partition (value)
 select temps.* from (
   select * from src where key < 100
   union all
   select * from src where key >= 100 and key < 200) temps;
 
-insert into table src2 partition (value)
+insert into table src2_n5 partition (value)
 select temps.* from (
   select * from src where key < 100
   union all

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/dp_counter_non_mm.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dp_counter_non_mm.q b/ql/src/test/queries/clientpositive/dp_counter_non_mm.q
index 960f7fc..561ae6e 100644
--- a/ql/src/test/queries/clientpositive/dp_counter_non_mm.q
+++ b/ql/src/test/queries/clientpositive/dp_counter_non_mm.q
@@ -3,48 +3,48 @@ set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.exec.max.dynamic.partitions.pernode=200;
 set hive.exec.max.dynamic.partitions=200;
 
-drop table src2;
-create table src2 (key int) partitioned by (value string);
+drop table src2_n3;
+create table src2_n3 (key int) partitioned by (value string);
 
 -- regular insert overwrite + insert into
 
 SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter;
-insert overwrite table src2 partition (value) select * from src where key < 100;
-insert into table src2 partition (value) select * from src where key < 200;
+insert overwrite table src2_n3 partition (value) select * from src where key < 100;
+insert into table src2_n3 partition (value) select * from src where key < 200;
 
-drop table src2;
-create table src2 (key int) partitioned by (value string);
+drop table src2_n3;
+create table src2_n3 (key int) partitioned by (value string);
 
-insert overwrite table src2 partition (value) select * from src where key < 200;
-insert into table src2 partition (value) select * from src where key < 300;
+insert overwrite table src2_n3 partition (value) select * from src where key < 200;
+insert into table src2_n3 partition (value) select * from src where key < 300;
 
 -- multi insert overwrite + insert into
 
-drop table src2;
-drop table src3;
-create table src2 (key int) partitioned by (value string);
-create table src3 (key int) partitioned by (value string);
+drop table src2_n3;
+drop table src3_n0;
+create table src2_n3 (key int) partitioned by (value string);
+create table src3_n0 (key int) partitioned by (value string);
 
 from src
-insert overwrite table src2 partition (value) select * where key < 100
-insert overwrite table src3 partition (value) select * where key >= 100 and key < 200;
+insert overwrite table src2_n3 partition (value) select * where key < 100
+insert overwrite table src3_n0 partition (value) select * where key >= 100 and key < 200;
 
 from src
-insert into table src2 partition (value) select * where key < 100
-insert into table src3 partition (value) select * where key >= 100 and key < 300;
+insert into table src2_n3 partition (value) select * where key < 100
+insert into table src3_n0 partition (value) select * where key >= 100 and key < 300;
 
 -- union all insert overwrite + insert into
 
-drop table src2;
-create table src2 (key int) partitioned by (value string);
+drop table src2_n3;
+create table src2_n3 (key int) partitioned by (value string);
 
-insert overwrite table src2 partition (value)
+insert overwrite table src2_n3 partition (value)
 select temps.* from (
   select * from src where key < 100
   union all
   select * from src where key >= 100 and key < 200) temps;
 
-insert into table src2 partition (value)
+insert into table src2_n3 partition (value)
 select temps.* from (
   select * from src where key < 100
   union all

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/drop_database_removes_partition_dirs.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_database_removes_partition_dirs.q b/ql/src/test/queries/clientpositive/drop_database_removes_partition_dirs.q
index e46460f..ba67fd7 100644
--- a/ql/src/test/queries/clientpositive/drop_database_removes_partition_dirs.q
+++ b/ql/src/test/queries/clientpositive/drop_database_removes_partition_dirs.q
@@ -7,15 +7,15 @@ CREATE DATABASE test_database;
 
 USE test_database;
 
-CREATE TABLE test_table (key STRING, value STRING)
+CREATE TABLE test_table_n12 (key STRING, value STRING)
 PARTITIONED BY (part STRING)
 STORED AS RCFILE
 LOCATION 'file:${system:test.tmp.dir}/drop_database_removes_partition_dirs_table';
 
-ALTER TABLE test_table ADD PARTITION (part = '1')
+ALTER TABLE test_table_n12 ADD PARTITION (part = '1')
 LOCATION 'file:${system:test.tmp.dir}/drop_database_removes_partition_dirs_table2/part=1';
 
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n12 PARTITION (part = '1')
 SELECT * FROM default.src;
 
 dfs -ls ${system:test.tmp.dir}/drop_database_removes_partition_dirs_table2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/drop_multi_partitions.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_multi_partitions.q b/ql/src/test/queries/clientpositive/drop_multi_partitions.q
index 7ee7ae7..65c60af 100644
--- a/ql/src/test/queries/clientpositive/drop_multi_partitions.q
+++ b/ql/src/test/queries/clientpositive/drop_multi_partitions.q
@@ -1,23 +1,23 @@
 create database dmp;
 
-create table dmp.mp (a string) partitioned by (b string, c string);
+create table dmp.mp_n0 (a string) partitioned by (b string, c string);
 
-alter table dmp.mp add partition (b='1', c='1');
-alter table dmp.mp add partition (b='1', c='2');
-alter table dmp.mp add partition (b='2', c='2');
+alter table dmp.mp_n0 add partition (b='1', c='1');
+alter table dmp.mp_n0 add partition (b='1', c='2');
+alter table dmp.mp_n0 add partition (b='2', c='2');
 
-show partitions dmp.mp;
+show partitions dmp.mp_n0;
 
-explain extended alter table dmp.mp drop partition (b='1');
-alter table dmp.mp drop partition (b='1');
+explain extended alter table dmp.mp_n0 drop partition (b='1');
+alter table dmp.mp_n0 drop partition (b='1');
 
-show partitions dmp.mp;
+show partitions dmp.mp_n0;
 
 set hive.exec.drop.ignorenonexistent=false;
-alter table dmp.mp drop if exists partition (b='3');
+alter table dmp.mp_n0 drop if exists partition (b='3');
 
-show partitions dmp.mp;
+show partitions dmp.mp_n0;
 
-drop table dmp.mp;
+drop table dmp.mp_n0;
 
 drop database dmp;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/drop_partition_with_stats.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_partition_with_stats.q b/ql/src/test/queries/clientpositive/drop_partition_with_stats.q
index 2211840..48a1f68 100644
--- a/ql/src/test/queries/clientpositive/drop_partition_with_stats.q
+++ b/ql/src/test/queries/clientpositive/drop_partition_with_stats.q
@@ -3,67 +3,67 @@ set hive.mapred.mode=nonstrict;
 -- The column stats for a partitioned table will go to PART_COL_STATS
 CREATE DATABASE IF NOT EXISTS partstatsdb1;
 USE partstatsdb1;
-CREATE TABLE IF NOT EXISTS testtable (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING);
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p11', Part2='P12');
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p21', Part2='P22');
-ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key;
-ANALYZE TABLE testtable PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key;
+CREATE TABLE IF NOT EXISTS testtable_n0 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING);
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable_n0 PARTITION (part1='p11', Part2='P12');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable_n0 PARTITION (part1='p21', Part2='P22');
+ANALYZE TABLE testtable_n0 COMPUTE STATISTICS FOR COLUMNS key;
+ANALYZE TABLE testtable_n0 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key;
 
 
-CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING);
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P11');
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P12');
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p21', Part2='P22');
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p31', Part2='P32');
-ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key;
-ANALYZE TABLE TestTable1 PARTITION (part1='p11') COMPUTE STATISTICS FOR COLUMNS key;
-ANALYZE TABLE TestTable1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key;
+CREATE TABLE IF NOT EXISTS TestTable1_n1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING);
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p11', Part2='P11');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p11', Part2='P12');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p21', Part2='P22');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p31', Part2='P32');
+ANALYZE TABLE TestTable1_n1 COMPUTE STATISTICS FOR COLUMNS key;
+ANALYZE TABLE TestTable1_n1 PARTITION (part1='p11') COMPUTE STATISTICS FOR COLUMNS key;
+ANALYZE TABLE TestTable1_n1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key;
 
-CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING);
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12');
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p21', Part2='P22');
-ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key;
-ANALYZE TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key;
+CREATE TABLE IF NOT EXISTS TESTTABLE2_n1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING);
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2_n1 PARTITION (part1='p11', Part2='P12');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2_n1 PARTITION (part1='p21', Part2='P22');
+ANALYZE TABLE TESTTABLE2_n1 COMPUTE STATISTICS FOR COLUMNS key;
+ANALYZE TABLE TESTTABLE2_n1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key;
 
-ALTER TABLE partstatsdb1.testtable DROP PARTITION (part1='p11', Part2='P12');
-ALTER TABLE partstatsdb1.TestTable1 DROP PARTITION (part1='p11', Part2='P12');
-ALTER TABLE partstatsdb1.TESTTABLE2 DROP PARTITION (part1='p11', Part2='P12');
+ALTER TABLE partstatsdb1.testtable_n0 DROP PARTITION (part1='p11', Part2='P12');
+ALTER TABLE partstatsdb1.TestTable1_n1 DROP PARTITION (part1='p11', Part2='P12');
+ALTER TABLE partstatsdb1.TESTTABLE2_n1 DROP PARTITION (part1='p11', Part2='P12');
 
-DROP TABLE partstatsdb1.testtable;
-DROP TABLE partstatsdb1.TestTable1;
-DROP TABLE partstatsdb1.TESTTABLE2;
+DROP TABLE partstatsdb1.testtable_n0;
+DROP TABLE partstatsdb1.TestTable1_n1;
+DROP TABLE partstatsdb1.TESTTABLE2_n1;
 DROP DATABASE partstatsdb1;
 
 CREATE DATABASE IF NOT EXISTS PARTSTATSDB2;
 USE PARTSTATSDB2;
-CREATE TABLE IF NOT EXISTS testtable (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING);
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p11', Part2='P12');
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p21', Part2='P22');
-ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key;
-ANALYZE TABLE testtable PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key;
+CREATE TABLE IF NOT EXISTS testtable_n0 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING);
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable_n0 PARTITION (part1='p11', Part2='P12');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable_n0 PARTITION (part1='p21', Part2='P22');
+ANALYZE TABLE testtable_n0 COMPUTE STATISTICS FOR COLUMNS key;
+ANALYZE TABLE testtable_n0 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key;
 
 
-CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING);
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P11');
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P12');
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p21', Part2='P22');
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p31', Part2='P32');
-ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key;
-ANALYZE TABLE TestTable1 PARTITION (part1='p11') COMPUTE STATISTICS FOR COLUMNS key;
-ANALYZE TABLE TestTable1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key;
+CREATE TABLE IF NOT EXISTS TestTable1_n1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING);
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p11', Part2='P11');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p11', Part2='P12');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p21', Part2='P22');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n1 PARTITION (part1='p31', Part2='P32');
+ANALYZE TABLE TestTable1_n1 COMPUTE STATISTICS FOR COLUMNS key;
+ANALYZE TABLE TestTable1_n1 PARTITION (part1='p11') COMPUTE STATISTICS FOR COLUMNS key;
+ANALYZE TABLE TestTable1_n1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key;
 
-CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING);
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12');
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p21', Part2='P22');
-ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key;
-ANALYZE TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key;
+CREATE TABLE IF NOT EXISTS TESTTABLE2_n1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING);
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2_n1 PARTITION (part1='p11', Part2='P12');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2_n1 PARTITION (part1='p21', Part2='P22');
+ANALYZE TABLE TESTTABLE2_n1 COMPUTE STATISTICS FOR COLUMNS key;
+ANALYZE TABLE TESTTABLE2_n1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key;
 
-ALTER TABLE PARTSTATSDB2.testtable DROP PARTITION (part1='p11', Part2='P12');
-ALTER TABLE PARTSTATSDB2.TestTable1 DROP PARTITION (part1='p11', Part2='P12');
-ALTER TABLE PARTSTATSDB2.TESTTABLE2 DROP PARTITION (part1='p11', Part2='P12');
+ALTER TABLE PARTSTATSDB2.testtable_n0 DROP PARTITION (part1='p11', Part2='P12');
+ALTER TABLE PARTSTATSDB2.TestTable1_n1 DROP PARTITION (part1='p11', Part2='P12');
+ALTER TABLE PARTSTATSDB2.TESTTABLE2_n1 DROP PARTITION (part1='p11', Part2='P12');
 
-DROP TABLE PARTSTATSDB2.testtable;
-DROP TABLE PARTSTATSDB2.TestTable1;
-DROP TABLE PARTSTATSDB2.TESTTABLE2;
+DROP TABLE PARTSTATSDB2.testtable_n0;
+DROP TABLE PARTSTATSDB2.TestTable1_n1;
+DROP TABLE PARTSTATSDB2.TESTTABLE2_n1;
 DROP DATABASE PARTSTATSDB2;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/drop_partitions_filter.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_partitions_filter.q b/ql/src/test/queries/clientpositive/drop_partitions_filter.q
index 04fbcae..5862753 100644
--- a/ql/src/test/queries/clientpositive/drop_partitions_filter.q
+++ b/ql/src/test/queries/clientpositive/drop_partitions_filter.q
@@ -1,37 +1,37 @@
-create table ptestfilter (a string, b int) partitioned by (c string, d string);
-describe ptestfilter;
-
-alter table ptestfilter add partition (c='US', d=1);
-alter table ptestfilter add partition (c='US', d=2);
-alter table ptestFilter add partition (c='Uganda', d=2);
-alter table ptestfilter add partition (c='Germany', d=2);
-alter table ptestfilter add partition (c='Canada', d=3);
-alter table ptestfilter add partition (c='Russia', d=3);
-alter table ptestfilter add partition (c='Greece', d=2);
-alter table ptestfilter add partition (c='India', d=3);
-alter table ptestfilter add partition (c='France', d=4);
-show partitions ptestfilter;
-
-alter table ptestfilter drop partition (c='US', d<'2');
-show partitions ptestfilter;
-
-alter table ptestfilter drop partition (c>='US', d<='2');
-show partitions ptestfilter;
-
-alter table ptestfilter drop partition (c >'India');
-show partitions ptestfilter;
-
-alter table ptestfilter drop partition (c >='India'),
+create table ptestfilter_n1 (a string, b int) partitioned by (c string, d string);
+describe ptestfilter_n1;
+
+alter table ptestfilter_n1 add partition (c='US', d=1);
+alter table ptestfilter_n1 add partition (c='US', d=2);
+alter table ptestFilter_n1 add partition (c='Uganda', d=2);
+alter table ptestfilter_n1 add partition (c='Germany', d=2);
+alter table ptestfilter_n1 add partition (c='Canada', d=3);
+alter table ptestfilter_n1 add partition (c='Russia', d=3);
+alter table ptestfilter_n1 add partition (c='Greece', d=2);
+alter table ptestfilter_n1 add partition (c='India', d=3);
+alter table ptestfilter_n1 add partition (c='France', d=4);
+show partitions ptestfilter_n1;
+
+alter table ptestfilter_n1 drop partition (c='US', d<'2');
+show partitions ptestfilter_n1;
+
+alter table ptestfilter_n1 drop partition (c>='US', d<='2');
+show partitions ptestfilter_n1;
+
+alter table ptestfilter_n1 drop partition (c >'India');
+show partitions ptestfilter_n1;
+
+alter table ptestfilter_n1 drop partition (c >='India'),
                              partition (c='Greece', d='2');
-show partitions ptestfilter;
+show partitions ptestfilter_n1;
 
-alter table ptestfilter drop partition (c != 'France');
-show partitions ptestfilter;
+alter table ptestfilter_n1 drop partition (c != 'France');
+show partitions ptestfilter_n1;
 
 set hive.exec.drop.ignorenonexistent=false;
-alter table ptestfilter drop if exists partition (c='US');
-show partitions ptestfilter;
+alter table ptestfilter_n1 drop if exists partition (c='US');
+show partitions ptestfilter_n1;
 
-drop table ptestfilter;
+drop table ptestfilter_n1;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/drop_partitions_filter2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_partitions_filter2.q b/ql/src/test/queries/clientpositive/drop_partitions_filter2.q
index 54e6a35..9b7bd7a 100644
--- a/ql/src/test/queries/clientpositive/drop_partitions_filter2.q
+++ b/ql/src/test/queries/clientpositive/drop_partitions_filter2.q
@@ -1,23 +1,23 @@
-create table ptestfilter (a string, b int) partitioned by (c int, d int);
-describe ptestfilter;
+create table ptestfilter_n0 (a string, b int) partitioned by (c int, d int);
+describe ptestfilter_n0;
 
-alter table ptestfilter add partition (c=1, d=1);
-alter table ptestfilter add partition (c=1, d=2);
-alter table ptestFilter add partition (c=2, d=1);
-alter table ptestfilter add partition (c=2, d=2);
-alter table ptestfilter add partition (c=3, d=1);
-alter table ptestfilter add partition (c=30, d=2);
-show partitions ptestfilter;
+alter table ptestfilter_n0 add partition (c=1, d=1);
+alter table ptestfilter_n0 add partition (c=1, d=2);
+alter table ptestFilter_n0 add partition (c=2, d=1);
+alter table ptestfilter_n0 add partition (c=2, d=2);
+alter table ptestfilter_n0 add partition (c=3, d=1);
+alter table ptestfilter_n0 add partition (c=30, d=2);
+show partitions ptestfilter_n0;
 
-alter table ptestfilter drop partition (c=1, d=1);
-show partitions ptestfilter;
+alter table ptestfilter_n0 drop partition (c=1, d=1);
+show partitions ptestfilter_n0;
 
-alter table ptestfilter drop partition (c=2);
-show partitions ptestfilter;
+alter table ptestfilter_n0 drop partition (c=2);
+show partitions ptestfilter_n0;
 
-alter table ptestfilter drop partition (c<4);
-show partitions ptestfilter;
+alter table ptestfilter_n0 drop partition (c<4);
+show partitions ptestfilter_n0;
 
-drop table ptestfilter;
+drop table ptestfilter_n0;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/drop_partitions_filter3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_partitions_filter3.q b/ql/src/test/queries/clientpositive/drop_partitions_filter3.q
index 2d41da4..55485a9 100644
--- a/ql/src/test/queries/clientpositive/drop_partitions_filter3.q
+++ b/ql/src/test/queries/clientpositive/drop_partitions_filter3.q
@@ -1,20 +1,20 @@
-create table ptestfilter (a string, b int) partitioned by (c string, d int);
-describe ptestfilter;
+create table ptestfilter_n3 (a string, b int) partitioned by (c string, d int);
+describe ptestfilter_n3;
 
-alter table ptestfilter add partition (c='1', d=1);
-alter table ptestfilter add partition (c='1', d=2);
-alter table ptestFilter add partition (c='2', d=1);
-alter table ptestfilter add partition (c='2', d=2);
-alter table ptestfilter add partition (c='3', d=1);
-alter table ptestfilter add partition (c='3', d=2);
-show partitions ptestfilter;
+alter table ptestfilter_n3 add partition (c='1', d=1);
+alter table ptestfilter_n3 add partition (c='1', d=2);
+alter table ptestFilter_n3 add partition (c='2', d=1);
+alter table ptestfilter_n3 add partition (c='2', d=2);
+alter table ptestfilter_n3 add partition (c='3', d=1);
+alter table ptestfilter_n3 add partition (c='3', d=2);
+show partitions ptestfilter_n3;
 
-alter table ptestfilter drop partition (c='1', d=1);
-show partitions ptestfilter;
+alter table ptestfilter_n3 drop partition (c='1', d=1);
+show partitions ptestfilter_n3;
 
-alter table ptestfilter drop partition (c='2');
-show partitions ptestfilter;
+alter table ptestfilter_n3 drop partition (c='2');
+show partitions ptestfilter_n3;
 
-drop table ptestfilter;
+drop table ptestfilter_n3;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/drop_partitions_filter4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_partitions_filter4.q b/ql/src/test/queries/clientpositive/drop_partitions_filter4.q
index ee6d46d..1b065ca 100644
--- a/ql/src/test/queries/clientpositive/drop_partitions_filter4.q
+++ b/ql/src/test/queries/clientpositive/drop_partitions_filter4.q
@@ -1,39 +1,39 @@
 SET hive.exec.dynamic.partition.mode=nonstrict;
 
-create table ptestfilter (a string, b int) partitioned by (c double);
-INSERT OVERWRITE TABLE ptestfilter PARTITION (c) select 'Col1', 1, null;
-alter table ptestfilter add partition (c=3.4);
-alter table ptestfilter add partition (c=5.55);
-show partitions ptestfilter;
-
-alter table ptestfilter drop partition(c = '__HIVE_DEFAULT_PARTITION__');
-alter table ptestfilter drop partition(c = 3.40);
-show partitions ptestfilter;
-
-INSERT OVERWRITE TABLE ptestfilter PARTITION (c) select 'Col1', 1, null;
-alter table ptestfilter drop partition(c != '__HIVE_DEFAULT_PARTITION__');
-show partitions ptestfilter;
-
-drop table ptestfilter;
-
-create table ptestfilter (a string, b int) partitioned by (c string, d int);
-INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col1', 1, null, null;
-INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col2', 2, null, 2;
-INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col3', 3, 'Uganda', null;
-alter table ptestfilter add partition (c='Germany', d=2);
-show partitions ptestfilter;
-
-alter table ptestfilter drop partition (c='__HIVE_DEFAULT_PARTITION__');
-alter table ptestfilter drop partition (c='Uganda', d='__HIVE_DEFAULT_PARTITION__');
-alter table ptestfilter drop partition (c='Germany', d=2);
-show partitions ptestfilter;
-
-INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col2', 2, null, 2;
-INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col2', 2, null, 3;
-INSERT OVERWRITE TABLE ptestfilter PARTITION (c,d) select 'Col3', 3, 'Uganda', null;
-alter table ptestfilter drop partition (d != 3);
-show partitions ptestfilter;
-
-drop table ptestfilter;
+create table ptestfilter_n2 (a string, b int) partitioned by (c double);
+INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c) select 'Col1', 1, null;
+alter table ptestfilter_n2 add partition (c=3.4);
+alter table ptestfilter_n2 add partition (c=5.55);
+show partitions ptestfilter_n2;
+
+alter table ptestfilter_n2 drop partition(c = '__HIVE_DEFAULT_PARTITION__');
+alter table ptestfilter_n2 drop partition(c = 3.40);
+show partitions ptestfilter_n2;
+
+INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c) select 'Col1', 1, null;
+alter table ptestfilter_n2 drop partition(c != '__HIVE_DEFAULT_PARTITION__');
+show partitions ptestfilter_n2;
+
+drop table ptestfilter_n2;
+
+create table ptestfilter_n2 (a string, b int) partitioned by (c string, d int);
+INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col1', 1, null, null;
+INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col2', 2, null, 2;
+INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col3', 3, 'Uganda', null;
+alter table ptestfilter_n2 add partition (c='Germany', d=2);
+show partitions ptestfilter_n2;
+
+alter table ptestfilter_n2 drop partition (c='__HIVE_DEFAULT_PARTITION__');
+alter table ptestfilter_n2 drop partition (c='Uganda', d='__HIVE_DEFAULT_PARTITION__');
+alter table ptestfilter_n2 drop partition (c='Germany', d=2);
+show partitions ptestfilter_n2;
+
+INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col2', 2, null, 2;
+INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col2', 2, null, 3;
+INSERT OVERWRITE TABLE ptestfilter_n2 PARTITION (c,d) select 'Col3', 3, 'Uganda', null;
+alter table ptestfilter_n2 drop partition (d != 3);
+show partitions ptestfilter_n2;
+
+drop table ptestfilter_n2;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/drop_table2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_table2.q b/ql/src/test/queries/clientpositive/drop_table2.q
index a3e8c5c..68cc28e 100644
--- a/ql/src/test/queries/clientpositive/drop_table2.q
+++ b/ql/src/test/queries/clientpositive/drop_table2.q
@@ -1,15 +1,15 @@
 SET hive.metastore.batch.retrieve.max=1;
-create table if not exists temp(col STRING) partitioned by (p STRING);
-alter table temp add if not exists partition (p ='p1');
-alter table temp add if not exists partition (p ='p2');
-alter table temp add if not exists partition (p ='p3');
+create table if not exists temp_n0(col STRING) partitioned by (p STRING);
+alter table temp_n0 add if not exists partition (p ='p1');
+alter table temp_n0 add if not exists partition (p ='p2');
+alter table temp_n0 add if not exists partition (p ='p3');
 
-show partitions temp;
+show partitions temp_n0;
 
-drop table temp;
+drop table temp_n0;
 
-create table if not exists temp(col STRING) partitioned by (p STRING);
+create table if not exists temp_n0(col STRING) partitioned by (p STRING);
 
-show partitions temp;
+show partitions temp_n0;
 
-drop table temp;
+drop table temp_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/drop_table_purge.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_table_purge.q b/ql/src/test/queries/clientpositive/drop_table_purge.q
index f094a5b..47c5310 100644
--- a/ql/src/test/queries/clientpositive/drop_table_purge.q
+++ b/ql/src/test/queries/clientpositive/drop_table_purge.q
@@ -1,4 +1,4 @@
 SET hive.metastore.batch.retrieve.max=1;
-CREATE TABLE IF NOT EXISTS temp(col STRING);
+CREATE TABLE IF NOT EXISTS temp_n1(col STRING);
 
-DROP TABLE temp PURGE;
+DROP TABLE temp_n1 PURGE;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/drop_table_removes_partition_dirs.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_table_removes_partition_dirs.q b/ql/src/test/queries/clientpositive/drop_table_removes_partition_dirs.q
index 1ba3f85..ff40340 100644
--- a/ql/src/test/queries/clientpositive/drop_table_removes_partition_dirs.q
+++ b/ql/src/test/queries/clientpositive/drop_table_removes_partition_dirs.q
@@ -3,20 +3,20 @@
 -- This test verifies that if a partition exists outside the table's current location when the
 -- table is dropped the partition's location is dropped as well.
 
-CREATE TABLE test_table (key STRING, value STRING)
+CREATE TABLE test_table_n3 (key STRING, value STRING)
 PARTITIONED BY (part STRING)
 STORED AS RCFILE
 LOCATION 'file:${system:test.tmp.dir}/drop_table_removes_partition_dirs_table';
 
-ALTER TABLE test_table ADD PARTITION (part = '1')
+ALTER TABLE test_table_n3 ADD PARTITION (part = '1')
 LOCATION 'file:${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2/part=1';
 
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n3 PARTITION (part = '1')
 SELECT * FROM src;
 
 dfs -ls ${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2;
 
-DROP TABLE test_table;
+DROP TABLE test_table_n3;
 
 dfs -ls ${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/drop_table_with_stats.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_table_with_stats.q b/ql/src/test/queries/clientpositive/drop_table_with_stats.q
index b655b53..ccf3d57 100644
--- a/ql/src/test/queries/clientpositive/drop_table_with_stats.q
+++ b/ql/src/test/queries/clientpositive/drop_table_with_stats.q
@@ -6,17 +6,17 @@ CREATE TABLE IF NOT EXISTS testtable (key STRING, value STRING);
 LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable;
 ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key;
 
-CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING);
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1;
-ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key;
+CREATE TABLE IF NOT EXISTS TestTable1_n0 (key STRING, value STRING);
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n0;
+ANALYZE TABLE TestTable1_n0 COMPUTE STATISTICS FOR COLUMNS key;
 
-CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING);
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2;
-ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key;
+CREATE TABLE IF NOT EXISTS TESTTABLE2_n0 (key STRING, value STRING);
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2_n0;
+ANALYZE TABLE TESTTABLE2_n0 COMPUTE STATISTICS FOR COLUMNS key;
 
 DROP TABLE tblstatsdb1.testtable;
-DROP TABLE tblstatsdb1.TestTable1;
-DROP TABLE tblstatsdb1.TESTTABLE2;
+DROP TABLE tblstatsdb1.TestTable1_n0;
+DROP TABLE tblstatsdb1.TESTTABLE2_n0;
 DROP DATABASE tblstatsdb1;
 
 CREATE DATABASE IF NOT EXISTS TBLSTATSDB2;
@@ -26,18 +26,18 @@ LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable
 ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key;
 
 
-CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING);
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1;
-ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key;
+CREATE TABLE IF NOT EXISTS TestTable1_n0 (key STRING, value STRING);
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1_n0;
+ANALYZE TABLE TestTable1_n0 COMPUTE STATISTICS FOR COLUMNS key;
 
 
-CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING);
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2;
-ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key;
+CREATE TABLE IF NOT EXISTS TESTTABLE2_n0 (key STRING, value STRING);
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2_n0;
+ANALYZE TABLE TESTTABLE2_n0 COMPUTE STATISTICS FOR COLUMNS key;
 
 
 DROP TABLE TBLSTATSDB2.testtable;
-DROP TABLE TBLSTATSDB2.TestTable1;
-DROP TABLE TBLSTATSDB2.TESTTABLE2;
+DROP TABLE TBLSTATSDB2.TestTable1_n0;
+DROP TABLE TBLSTATSDB2.TESTTABLE2_n0;
 DROP DATABASE TBLSTATSDB2;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/druid_basic2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/druid_basic2.q b/ql/src/test/queries/clientpositive/druid_basic2.q
index 3c17bc5..f1d215a 100644
--- a/ql/src/test/queries/clientpositive/druid_basic2.q
+++ b/ql/src/test/queries/clientpositive/druid_basic2.q
@@ -1,28 +1,28 @@
 set hive.strict.checks.cartesian.product=false;
 set hive.druid.broker.address.default=localhost.test;
 
-CREATE EXTERNAL TABLE druid_table_1
+CREATE EXTERNAL TABLE druid_table_1_n2
 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
 TBLPROPERTIES ("druid.datasource" = "wikipedia");
 
-DESCRIBE FORMATTED druid_table_1;
+DESCRIBE FORMATTED druid_table_1_n2;
 
 -- dimension
 EXPLAIN EXTENDED
-SELECT robot FROM druid_table_1;
+SELECT robot FROM druid_table_1_n2;
 
 -- metric
 EXPLAIN EXTENDED
-SELECT delta FROM druid_table_1;
+SELECT delta FROM druid_table_1_n2;
 
 EXPLAIN EXTENDED
 SELECT robot
-FROM druid_table_1
+FROM druid_table_1_n2
 WHERE language = 'en';
 
 EXPLAIN EXTENDED
 SELECT DISTINCT robot
-FROM druid_table_1
+FROM druid_table_1_n2
 WHERE language = 'en';
 
 -- TODO: currently nothing is pushed - ISNOTNULL
@@ -31,10 +31,10 @@ SELECT a.robot, b.language
 FROM
 (
   (SELECT robot, language
-  FROM druid_table_1) a
+  FROM druid_table_1_n2) a
   JOIN
   (SELECT language
-  FROM druid_table_1) b
+  FROM druid_table_1_n2) b
   ON a.language = b.language
 );
 
@@ -43,28 +43,28 @@ SELECT a.robot, b.language
 FROM
 (
   (SELECT robot, language
-  FROM druid_table_1
+  FROM druid_table_1_n2
   WHERE language = 'en') a
   JOIN
   (SELECT language
-  FROM druid_table_1) b
+  FROM druid_table_1_n2) b
   ON a.language = b.language
 );
 
 EXPLAIN EXTENDED
 SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s
-FROM druid_table_1
+FROM druid_table_1_n2
 GROUP BY robot, language, floor_day(`__time`)
 ORDER BY CAST(robot AS INTEGER) ASC, m DESC
 LIMIT 10;
 
 EXPLAIN
 SELECT substring(namespace, CAST(deleted AS INT), 4)
-FROM druid_table_1;
+FROM druid_table_1_n2;
 
 EXPLAIN
 SELECT robot, floor_day(`__time`)
-FROM druid_table_1
+FROM druid_table_1_n2
 WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00'
 GROUP BY robot, floor_day(`__time`)
 ORDER BY robot
@@ -72,7 +72,7 @@ LIMIT 10;
 
 EXPLAIN
 SELECT robot, `__time`
-FROM druid_table_1
+FROM druid_table_1_n2
 WHERE floor_day(`__time`) BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00'
 GROUP BY robot, `__time`
 ORDER BY robot
@@ -80,7 +80,7 @@ LIMIT 10;
 
 EXPLAIN
 SELECT robot, floor_day(`__time`)
-FROM druid_table_1
+FROM druid_table_1_n2
 WHERE `__time` BETWEEN '1999-11-01 00:00:00' AND '1999-11-10 00:00:00'
 GROUP BY robot, floor_day(`__time`)
 ORDER BY robot
@@ -90,7 +90,7 @@ LIMIT 10;
 set hive.cbo.enable=false;
 EXPLAIN EXTENDED
 SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s
-FROM druid_table_1
+FROM druid_table_1_n2
 GROUP BY robot, language, floor_day(`__time`)
 ORDER BY CAST(robot AS INTEGER) ASC, m DESC
 LIMIT 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/druid_basic3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/druid_basic3.q b/ql/src/test/queries/clientpositive/druid_basic3.q
index 624beeb..f43b636 100644
--- a/ql/src/test/queries/clientpositive/druid_basic3.q
+++ b/ql/src/test/queries/clientpositive/druid_basic3.q
@@ -1,43 +1,43 @@
 set hive.strict.checks.cartesian.product=false;
 set hive.druid.broker.address.default=localhost.test;
 
-CREATE EXTERNAL TABLE druid_table_1
+CREATE EXTERNAL TABLE druid_table_1_n4
 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
 TBLPROPERTIES ("druid.datasource" = "wikipedia");
 
 EXPLAIN
 SELECT sum(added) + sum(delta) as a, language
-FROM druid_table_1
+FROM druid_table_1_n4
 GROUP BY language
 ORDER BY a DESC;
 
 EXPLAIN
 SELECT sum(delta), sum(added) + sum(delta) AS a, language
-FROM druid_table_1
+FROM druid_table_1_n4
 GROUP BY language
 ORDER BY a DESC;
 
 EXPLAIN
 SELECT language, sum(added) / sum(delta) AS a
-FROM druid_table_1
+FROM druid_table_1_n4
 GROUP BY language
 ORDER BY a DESC;
         
 EXPLAIN
 SELECT language, sum(added) * sum(delta) AS a
-FROM druid_table_1
+FROM druid_table_1_n4
 GROUP BY language
 ORDER BY a DESC;
 
 EXPLAIN
 SELECT language, sum(added) - sum(delta) AS a
-FROM druid_table_1
+FROM druid_table_1_n4
 GROUP BY language
 ORDER BY a DESC;
         
 EXPLAIN
 SELECT language, sum(added) + 100 AS a
-FROM druid_table_1
+FROM druid_table_1_n4
 GROUP BY language
 ORDER BY a DESC;
 
@@ -45,26 +45,26 @@ EXPLAIN
 SELECT language, -1 * (a + b) AS c
 FROM (
   SELECT (sum(added)-sum(delta)) / (count(*) * 3) AS a, sum(deleted) AS b, language
-  FROM druid_table_1
+  FROM druid_table_1_n4
   GROUP BY language) subq
 ORDER BY c DESC;
 
 EXPLAIN
 SELECT language, robot, sum(added) - sum(delta) AS a
-FROM druid_table_1
+FROM druid_table_1_n4
 WHERE extract (week from `__time`) IN (10,11)
 GROUP BY language, robot;
 
 EXPLAIN
 SELECT language, sum(delta) / count(*) AS a
-FROM druid_table_1
+FROM druid_table_1_n4
 GROUP BY language
 ORDER BY a DESC;
 
 EXPLAIN
 SELECT language, sum(added) / sum(delta) AS a,
        CASE WHEN sum(deleted)=0 THEN 1.0 ELSE sum(deleted) END AS b
-FROM druid_table_1
+FROM druid_table_1_n4
 GROUP BY language
 ORDER BY a DESC;
 
@@ -72,7 +72,7 @@ EXPLAIN
 SELECT language, a, a - b as c
 FROM (
   SELECT language, sum(added) + 100 AS a, sum(delta) AS b
-  FROM druid_table_1
+  FROM druid_table_1_n4
   GROUP BY language) subq
 ORDER BY a DESC;
 
@@ -80,7 +80,7 @@ EXPLAIN
 SELECT language, robot, "A"
 FROM (
   SELECT sum(added) - sum(delta) AS a, language, robot
-  FROM druid_table_1
+  FROM druid_table_1_n4
   GROUP BY language, robot ) subq
 ORDER BY "A"
 LIMIT 5;
@@ -89,7 +89,7 @@ EXPLAIN
 SELECT language, robot, "A"
 FROM (
   SELECT language, sum(added) + sum(delta) AS a, robot
-  FROM druid_table_1
+  FROM druid_table_1_n4
   GROUP BY language, robot) subq
 ORDER BY robot, language
 LIMIT 5;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/druid_intervals.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/druid_intervals.q b/ql/src/test/queries/clientpositive/druid_intervals.q
index 140ff82..a7ee052 100644
--- a/ql/src/test/queries/clientpositive/druid_intervals.q
+++ b/ql/src/test/queries/clientpositive/druid_intervals.q
@@ -1,67 +1,67 @@
 set hive.druid.broker.address.default=localhost.test;
 
-CREATE EXTERNAL TABLE druid_table_1
+CREATE EXTERNAL TABLE druid_table_1_n0
 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
 TBLPROPERTIES ("druid.datasource" = "wikipedia");
 
-DESCRIBE FORMATTED druid_table_1;
+DESCRIBE FORMATTED druid_table_1_n0;
 
 -- (-∞‥+∞)
 EXPLAIN
 SELECT `__time`
-FROM druid_table_1;
+FROM druid_table_1_n0;
 
 -- (-∞‥2012-03-01 00:00:00)
 EXPLAIN
 SELECT `__time`
-FROM druid_table_1
+FROM druid_table_1_n0
 WHERE `__time` < '2012-03-01 00:00:00';
 
 -- [2010-01-01 00:00:00‥2012-03-01 00:00:00)
 EXPLAIN
 SELECT `__time`
-FROM druid_table_1
+FROM druid_table_1_n0
 WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00';
 
 -- [2010-01-01 00:00:00‥2011-01-01 00:00:00)
 EXPLAIN
 SELECT `__time`
-FROM druid_table_1
+FROM druid_table_1_n0
 WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00'
     AND `__time` < '2011-01-01 00:00:00';
 
 -- [2010-01-01 00:00:00‥2011-01-01 00:00:00]
 EXPLAIN
 SELECT `__time`
-FROM druid_table_1
+FROM druid_table_1_n0
 WHERE `__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00';
 
 -- [2010-01-01 00:00:00‥2011-01-01 00:00:00],[2012-01-01 00:00:00‥2013-01-01 00:00:00]
 EXPLAIN
 SELECT `__time`
-FROM druid_table_1
+FROM druid_table_1_n0
 WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00')
     OR (`__time` BETWEEN '2012-01-01 00:00:00' AND '2013-01-01 00:00:00');
 
 -- OVERLAP [2010-01-01 00:00:00‥2012-01-01 00:00:00]
 EXPLAIN
 SELECT `__time`
-FROM druid_table_1
+FROM druid_table_1_n0
 WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00')
     OR (`__time` BETWEEN '2010-06-01 00:00:00' AND '2012-01-01 00:00:00');
 
 -- IN: MULTIPLE INTERVALS [2010-01-01 00:00:00‥2010-01-01 00:00:00),[2011-01-01 00:00:00‥2011-01-01 00:00:00)
 EXPLAIN
 SELECT `__time`
-FROM druid_table_1
+FROM druid_table_1_n0
 WHERE `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00');
 
 EXPLAIN
 SELECT `__time`, robot
-FROM druid_table_1
+FROM druid_table_1_n0
 WHERE robot = 'user1' AND `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00');
 
 EXPLAIN
 SELECT `__time`, robot
-FROM druid_table_1
+FROM druid_table_1_n0
 WHERE robot = 'user1' OR `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00');

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/druid_timeseries.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/druid_timeseries.q b/ql/src/test/queries/clientpositive/druid_timeseries.q
index a330ade..6ff7d59 100644
--- a/ql/src/test/queries/clientpositive/druid_timeseries.q
+++ b/ql/src/test/queries/clientpositive/druid_timeseries.q
@@ -1,30 +1,30 @@
 set hive.druid.broker.address.default=localhost.test;
 
-CREATE EXTERNAL TABLE druid_table_1
+CREATE EXTERNAL TABLE druid_table_1_n3
 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
 TBLPROPERTIES ("druid.datasource" = "wikipedia");
 
--- DESCRIBE FORMATTED druid_table_1;
+-- DESCRIBE FORMATTED druid_table_1_n3;
 
 -- GRANULARITY: ALL
-EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` >= '2010-01-01 00:00:00 UTC' AND  `__time` <= '2012-03-01 00:00:00 UTC' OR  added <= 0;
+EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` >= '2010-01-01 00:00:00 UTC' AND  `__time` <= '2012-03-01 00:00:00 UTC' OR  added <= 0;
 
-EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` <= '2010-01-01 00:00:00 UTC';
+EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00 UTC';
 
 EXPLAIN
 SELECT max(added), sum(variation)
-FROM druid_table_1;
+FROM druid_table_1_n3;
 
 -- GRANULARITY: NONE
 EXPLAIN
 SELECT `__time`, max(added), sum(variation)
-FROM druid_table_1
+FROM druid_table_1_n3
 GROUP BY `__time`;
 
 -- GRANULARITY: YEAR
 EXPLAIN
 SELECT floor_year(`__time`), max(added), sum(variation)
-FROM druid_table_1
+FROM druid_table_1_n3
 GROUP BY floor_year(`__time`);
 
 -- @TODO FIXME https://issues.apache.org/jira/browse/CALCITE-2222
@@ -32,56 +32,56 @@ GROUP BY floor_year(`__time`);
 -- GRANULARITY: QUARTER
 EXPLAIN
 SELECT floor_quarter(`__time`), max(added), sum(variation)
-FROM druid_table_1
+FROM druid_table_1_n3
 GROUP BY floor_quarter(`__time`);
 
 -- GRANULARITY: MONTH
 EXPLAIN
 SELECT floor_month(`__time`), max(added), sum(variation)
-FROM druid_table_1
+FROM druid_table_1_n3
 GROUP BY floor_month(`__time`);
 
 -- GRANULARITY: WEEK
 EXPLAIN
 SELECT floor_week(`__time`), max(added), sum(variation)
-FROM druid_table_1
+FROM druid_table_1_n3
 GROUP BY floor_week(`__time`);
 
 -- GRANULARITY: DAY
 EXPLAIN
 SELECT floor_day(`__time`), max(added), sum(variation)
-FROM druid_table_1
+FROM druid_table_1_n3
 GROUP BY floor_day(`__time`);
 
 -- GRANULARITY: HOUR
 EXPLAIN
 SELECT floor_hour(`__time`), max(added), sum(variation)
-FROM druid_table_1
+FROM druid_table_1_n3
 GROUP BY floor_hour(`__time`);
 
 -- GRANULARITY: MINUTE
 EXPLAIN
 SELECT floor_minute(`__time`), max(added), sum(variation)
-FROM druid_table_1
+FROM druid_table_1_n3
 GROUP BY floor_minute(`__time`);
 
 -- GRANULARITY: SECOND
 EXPLAIN
 SELECT floor_second(`__time`), max(added), sum(variation)
-FROM druid_table_1
+FROM druid_table_1_n3
 GROUP BY floor_second(`__time`);
 
 -- WITH FILTER ON DIMENSION
 EXPLAIN
 SELECT floor_hour(`__time`), max(added), sum(variation)
-FROM druid_table_1
+FROM druid_table_1_n3
 WHERE robot='1'
 GROUP BY floor_hour(`__time`);
 
 -- WITH FILTER ON TIME
 EXPLAIN
 SELECT floor_hour(`__time`), max(added), sum(variation)
-FROM druid_table_1
+FROM druid_table_1_n3
 WHERE floor_hour(`__time`)
     BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
         AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
@@ -93,19 +93,19 @@ SELECT subq.h, subq.m, subq.s
 FROM
 (
   SELECT floor_hour(`__time`) as h, max(added) as m, sum(variation) as s
-  FROM druid_table_1
+  FROM druid_table_1_n3
   GROUP BY floor_hour(`__time`)
 ) subq
 WHERE subq.h BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE)
         AND CAST('2014-01-01 00:00:00' AS TIMESTAMP WITH LOCAL TIME ZONE);
 
 -- Simplification of count(__time) as count(*) since time column is not null
-EXPLAIN SELECT count(`__time`) from druid_table_1;
+EXPLAIN SELECT count(`__time`) from druid_table_1_n3;
 
 
-EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` <= '2010-01-01 00:00:00 UTC';
+EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00 UTC';
 
-EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` >= '2010-01-01 00:00:00';
+EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` >= '2010-01-01 00:00:00';
 
 
-EXPLAIN SELECT count(`__time`) from druid_table_1 where `__time` <= '2010-01-01 00:00:00' OR  `__time` <= '2012-03-01 00:00:00';
+EXPLAIN SELECT count(`__time`) from druid_table_1_n3 where `__time` <= '2010-01-01 00:00:00' OR  `__time` <= '2012-03-01 00:00:00';