You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2018/05/25 00:58:04 UTC

[32/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/infer_bucket_sort.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort.q b/ql/src/test/queries/clientpositive/infer_bucket_sort.q
index 3423299..43506b8 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort.q
@@ -4,156 +4,156 @@ set hive.exec.infer.bucket.sort=true;
 -- This tests inferring how data is bucketed/sorted from the operators in the reducer
 -- and populating that information in partitions' metadata
 
-CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING);
+CREATE TABLE test_table_n5 (key STRING, value STRING) PARTITIONED BY (part STRING);
 
 -- Test group by, should be bucketed and sorted by group by key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT key, count(*) FROM src GROUP BY key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test group by where a key isn't selected, should not be bucketed or sorted
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT key, count(*) FROM src GROUP BY key, value;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test join, should be bucketed and sorted by join key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test join with two keys, should be bucketed and sorted by join keys
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key AND a.value = b.value;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test join with two keys and only one selected, should not be bucketed or sorted
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT a.key, '1' FROM src a JOIN src b ON a.key = b.key AND a.value = b.value;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test join on three tables on same key, should be bucketed and sorted by join key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.key = c.key);
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test join on three tables on different keys, should be bucketed and sorted by latter key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.value = c.value);
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test distribute by, should only be bucketed by key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT key, value FROM src DISTRIBUTE BY key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test sort by, should be sorted by key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT key, value FROM src SORT BY key ASC;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test sort by desc, should be sorted by key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT key, value FROM src SORT BY key DESC;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test cluster by, should be bucketed and sorted by key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT key, value FROM src CLUSTER BY key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test distribute by and sort by different keys, should be bucketed by one key sorted by the other
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT key, value FROM src DISTRIBUTE BY key SORT BY value;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test join in simple subquery, should be bucketed and sorted on key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1')
 SELECT key, value from (SELECT a.key, b.value FROM src a JOIN src b ON (a.key = b.key)) subq;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test join in simple subquery renaming key column, should be bucketed and sorted on key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1')
 SELECT k, value FROM (SELECT a.key as k, b.value FROM src a JOIN src b ON (a.key = b.key)) subq;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test group by in simple subquery, should be bucketed and sorted on key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1')
 SELECT key, cnt from (SELECT key, count(*) as cnt FROM src GROUP BY key) subq;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test group by in simple subquery renaming key column, should be bucketed and sorted on key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1')
 SELECT k, cnt FROM (SELECT key as k, count(*) as cnt FROM src GROUP BY key) subq;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test group by in subquery with where outside, should still be bucketed and sorted on key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1')
 SELECT key, value FROM (SELECT key, count(1) AS value FROM src group by key) a where key < 10;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test group by in subquery with expression on value, should still be bucketed and sorted on key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1')
 SELECT key, value + 1 FROM (SELECT key, count(1) AS value FROM src group by key) a where key < 10;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test group by in subquery with lateral view outside, should still be bucketed and sorted
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1')
 SELECT key, value FROM (SELECT key FROM src group by key) a lateral view explode(array(1, 2)) value as value;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test group by in subquery with another group by outside, should be bucketed and sorted by the
 -- key of the outer group by
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1')
 SELECT count(1), value FROM (SELECT key, count(1) as value FROM src group by key) a group by value;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test group by in subquery with select on outside reordering the columns, should be bucketed and
 -- sorted by the column the group by key ends up in
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1')
 SELECT value, key FROM (SELECT key, count(1) as value FROM src group by key) a;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test group by in subquery followed by distribute by, should only be bucketed by the distribute key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1')
 SELECT key, value FROM (SELECT key, count(1) as value FROM src group by key) a distribute by key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test group by in subquery followed by sort by, should only be sorted by the sort key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1')
 SELECT key, value FROM (SELECT key, count(1) as value FROM src group by key) a sort by key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test group by in subquery followed by transform script, should not be bucketed or sorted
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1')
 SELECT TRANSFORM (a.key, a.value) USING 'cat' AS (key, value) FROM (SELECT key, count(1) AS value FROM src GROUP BY KEY) a;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test group by on function, should be bucketed and sorted by key and value because the function is applied in the mapper
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1')
 SELECT key, value FROM (SELECT concat(key, "a") AS key, value, count(*)  FROM src GROUP BY concat(key, "a"), value) a;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/infer_bucket_sort_convert_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_convert_join.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_convert_join.q
index e417028..2ad5195 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_convert_join.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_convert_join.q
@@ -16,13 +16,13 @@ set hive.auto.convert.join=true;
 -- and populating that information in partitions' metadata.  In particular, those cases
 -- where joins may be auto converted to map joins.
 
-CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING);
+CREATE TABLE test_table_n11 (key STRING, value STRING) PARTITIONED BY (part STRING);
 
 -- Tests a join which is converted to a map join, the output should be neither bucketed nor sorted
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n11 PARTITION (part = '1') 
 SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n11 PARTITION (part = '1');
 
 set hive.mapjoin.check.memory.rows=1;
 set hive.mapjoin.localtask.max.memory.usage = 0.0001;
@@ -31,7 +31,7 @@ set hive.auto.convert.join.noconditionaltask = false;
 -- This test tests the scenario when the mapper dies. So, create a conditional task for the mapjoin.
 -- Tests a join which is not converted to a map join, the output should be bucketed and sorted.
 
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n11 PARTITION (part = '1') 
 SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n11 PARTITION (part = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q
index 5b5bf8e..b84e98c 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q
@@ -11,54 +11,54 @@ set hive.merge.mapredfiles=false;
 -- and populating that information in partitions' metadata.  In particular, those cases
 -- where dynamic partitioning is used.
 
-CREATE TABLE test_table LIKE srcpart;
-ALTER TABLE test_table SET FILEFORMAT RCFILE;
+CREATE TABLE test_table_n8 LIKE srcpart;
+ALTER TABLE test_table_n8 SET FILEFORMAT RCFILE;
 
 -- Simple case, this should not be bucketed or sorted
 
-INSERT OVERWRITE TABLE test_table PARTITION (ds, hr)
+INSERT OVERWRITE TABLE test_table_n8 PARTITION (ds, hr)
 SELECT key, value, ds, hr FROM srcpart
 WHERE ds = '2008-04-08';
 
-DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='11');
-DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='12');
+DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='11');
+DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='12');
 
 -- This should not be bucketed or sorted since the partition keys are in the set of bucketed
 -- and sorted columns for the output 
 
-INSERT OVERWRITE TABLE test_table PARTITION (ds, hr)
+INSERT OVERWRITE TABLE test_table_n8 PARTITION (ds, hr)
 SELECT key, COUNT(*), ds, hr FROM srcpart
 WHERE ds = '2008-04-08'
 GROUP BY key, ds, hr;
 
-DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='11');
-DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='12');
+DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='11');
+DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='12');
 
 -- Both partitions should be bucketed and sorted by key
 
-INSERT OVERWRITE TABLE test_table PARTITION (ds, hr)
+INSERT OVERWRITE TABLE test_table_n8 PARTITION (ds, hr)
 SELECT key, value, '2008-04-08', IF (key % 2 == 0, '11', '12') FROM
 (SELECT key, COUNT(*) AS value FROM srcpart
 WHERE ds = '2008-04-08'
 GROUP BY key) a;
 
-DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='11');
-DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='12');
+DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='11');
+DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='12');
 
-CREATE TABLE srcpart_merge_dp LIKE srcpart;
+CREATE TABLE srcpart_merge_dp_n3 LIKE srcpart;
 
-CREATE TABLE srcpart_merge_dp_rc LIKE srcpart;
-ALTER TABLE srcpart_merge_dp_rc SET FILEFORMAT RCFILE;
+CREATE TABLE srcpart_merge_dp_rc_n0 LIKE srcpart;
+ALTER TABLE srcpart_merge_dp_rc_n0 SET FILEFORMAT RCFILE;
 
-LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11);
-LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11);
-LOAD DATA LOCAL INPATH '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11);
-LOAD DATA LOCAL INPATH '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11);
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n3 PARTITION(ds='2008-04-08', hr=11);
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp_n3 PARTITION(ds='2008-04-08', hr=11);
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp_n3 PARTITION(ds='2008-04-08', hr=11);
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp_n3 PARTITION(ds='2008-04-08', hr=11);
 
-LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=12);
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp_n3 PARTITION(ds='2008-04-08', hr=12);
 
-INSERT OVERWRITE TABLE srcpart_merge_dp_rc PARTITION (ds = '2008-04-08', hr) 
-SELECT key, value, hr FROM srcpart_merge_dp WHERE ds = '2008-04-08';
+INSERT OVERWRITE TABLE srcpart_merge_dp_rc_n0 PARTITION (ds = '2008-04-08', hr) 
+SELECT key, value, hr FROM srcpart_merge_dp_n3 WHERE ds = '2008-04-08';
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
 set hive.merge.mapfiles=true;
@@ -74,17 +74,17 @@ set mapred.reduce.tasks=2;
 -- definitely not be.
 
 EXPLAIN
-INSERT OVERWRITE TABLE test_table PARTITION (ds = '2008-04-08', hr)
+INSERT OVERWRITE TABLE test_table_n8 PARTITION (ds = '2008-04-08', hr)
 SELECT key, value, IF (key % 100 == 0, '11', '12') FROM
 (SELECT key, COUNT(*) AS value FROM srcpart
 WHERE ds = '2008-04-08'
 GROUP BY key) a;
 
-INSERT OVERWRITE TABLE test_table PARTITION (ds = '2008-04-08', hr)
+INSERT OVERWRITE TABLE test_table_n8 PARTITION (ds = '2008-04-08', hr)
 SELECT key, value, IF (key % 100 == 0, '11', '12') FROM
 (SELECT key, COUNT(*) AS value FROM srcpart
 WHERE ds = '2008-04-08'
 GROUP BY key) a;
 
-DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='11');
-DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='12');
+DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='11');
+DESCRIBE FORMATTED test_table_n8 PARTITION (ds='2008-04-08', hr='12');

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q
index 1a778fa..fa95da1 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_map_operators.q
@@ -7,47 +7,47 @@ set hive.exec.infer.bucket.sort=true;
 -- and populating that information in partitions' metadata, in particular, this tests
 -- that operators in the mapper have no effect
 
-CREATE TABLE test_table1 (key STRING, value STRING)
+CREATE TABLE test_table1_n14 (key STRING, value STRING)
 CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS;
 
-CREATE TABLE test_table2 (key STRING, value STRING)
+CREATE TABLE test_table2_n13 (key STRING, value STRING)
 CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS;
 
-INSERT OVERWRITE TABLE test_table1 SELECT key, value FROM src;
+INSERT OVERWRITE TABLE test_table1_n14 SELECT key, value FROM src;
 
-INSERT OVERWRITE TABLE test_table2 SELECT key, value FROM src;
+INSERT OVERWRITE TABLE test_table2_n13 SELECT key, value FROM src;
 
-CREATE TABLE test_table_out (key STRING, value STRING) PARTITIONED BY (part STRING);
+CREATE TABLE test_table_out_n0 (key STRING, value STRING) PARTITIONED BY (part STRING);
 
 set hive.map.groupby.sorted=true;
 
 -- Test map group by doesn't affect inference, should not be bucketed or sorted
-EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') 
-SELECT key, count(*) FROM test_table1 GROUP BY key;
+EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') 
+SELECT key, count(*) FROM test_table1_n14 GROUP BY key;
 
-INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') 
-SELECT key, count(*) FROM test_table1 GROUP BY key;
+INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') 
+SELECT key, count(*) FROM test_table1_n14 GROUP BY key;
 
-DESCRIBE FORMATTED test_table_out PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1');
 
 -- Test map group by doesn't affect inference, should be bucketed and sorted by value
-EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') 
+EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') 
 SELECT a.key, a.value FROM (
-	SELECT key, count(*) AS value FROM test_table1 GROUP BY key
+	SELECT key, count(*) AS value FROM test_table1_n14 GROUP BY key
 ) a JOIN (
  	SELECT key, value FROM src
 ) b
 ON (a.value = b.value);
 
-INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1') 
 SELECT a.key, a.value FROM (
-	SELECT key, cast(count(*) AS STRING) AS value FROM test_table1 GROUP BY key
+	SELECT key, cast(count(*) AS STRING) AS value FROM test_table1_n14 GROUP BY key
 ) a JOIN (
  	SELECT key, value FROM src
 ) b
 ON (a.value = b.value);
 
-DESCRIBE FORMATTED test_table_out PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1');
 
 set hive.map.groupby.sorted=false;
 set hive.optimize.bucketmapjoin = true;
@@ -55,22 +55,22 @@ set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.cbo.enable=false;
 
 -- Test SMB join doesn't affect inference, should not be bucketed or sorted
-EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1')
-SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key;
+EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
+SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key;
 
-INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1')
-SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key;
+INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
+SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key;
 
-DESCRIBE FORMATTED test_table_out PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1');
 
 -- Test SMB join doesn't affect inference, should be bucketed and sorted by key
-EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1')
-SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
+SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key
 GROUP BY b.value;
 
-INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1')
-SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
+SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key
 GROUP BY b.value;
 
-DESCRIBE FORMATTED test_table_out PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q
index f8cde4b..fa12ca3 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_merge.q
@@ -8,19 +8,19 @@ set mapred.reduce.tasks=2;
 -- and populating that information in partitions' metadata.  In particular, those cases
 -- where where merging may or may not be used.
 
-CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING);
+CREATE TABLE test_table_n16 (key STRING, value STRING) PARTITIONED BY (part STRING);
 
 -- Tests a reduce task followed by a merge.  The output should be neither bucketed nor sorted.
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n16 PARTITION (part = '1') 
 SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n16 PARTITION (part = '1');
 
 set hive.merge.smallfiles.avgsize=2;
 set hive.exec.compress.output=false;
 
 -- Tests a reduce task followed by a move. The output should be bucketed and sorted.
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n16 PARTITION (part = '1') 
 SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n16 PARTITION (part = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q
index 2255bdb..49175ac 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_num_buckets.q
@@ -3,7 +3,7 @@ set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set mapred.reduce.tasks=2;
 
-CREATE TABLE test_table (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING);
+CREATE TABLE test_table_n0 (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING);
 
 -- Tests dynamic partitions where bucketing/sorting can be inferred, but not all reducers write
 -- all partitions.  The subquery produces rows as follows
@@ -19,19 +19,19 @@ CREATE TABLE test_table (key INT, value STRING) PARTITIONED BY (ds STRING, hr ST
 -- and hr=1 should not.
 
 EXPLAIN
-INSERT OVERWRITE TABLE test_table PARTITION (ds = '2008-04-08', hr)
+INSERT OVERWRITE TABLE test_table_n0 PARTITION (ds = '2008-04-08', hr)
 SELECT key2, value, cast(hr as int) FROM
 (SELECT if ((key % 3) < 2, 0, 1) as key2, value, (key % 2) as hr
 FROM srcpart
 WHERE ds = '2008-04-08') a
 DISTRIBUTE BY key2;
 
-INSERT OVERWRITE TABLE test_table PARTITION (ds = '2008-04-08', hr)
+INSERT OVERWRITE TABLE test_table_n0 PARTITION (ds = '2008-04-08', hr)
 SELECT key2, value, cast(hr as int) FROM
 (SELECT if ((key % 3) < 2, 0, 1) as key2, value, (key % 3 % 2) as hr
 FROM srcpart
 WHERE ds = '2008-04-08') a
 DISTRIBUTE BY key2;
 
-DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='0');
-DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='1');
+DESCRIBE FORMATTED test_table_n0 PARTITION (ds='2008-04-08', hr='0');
+DESCRIBE FORMATTED test_table_n0 PARTITION (ds='2008-04-08', hr='1');

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/infer_bucket_sort_reducers_power_two.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort_reducers_power_two.q b/ql/src/test/queries/clientpositive/infer_bucket_sort_reducers_power_two.q
index c0ddb8b..a9cbb96 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort_reducers_power_two.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort_reducers_power_two.q
@@ -8,41 +8,41 @@ set hive.exec.reducers.bytes.per.reducer=2500;
 -- and populating that information in partitions' metadata, it also verifies that the
 -- number of reducers chosen will be a power of two
 
-CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING);
+CREATE TABLE test_table_n14 (key STRING, value STRING) PARTITIONED BY (part STRING);
 
 -- Test group by, should be bucketed and sorted by group by key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n14 PARTITION (part = '1') 
 SELECT key, count(*) FROM src GROUP BY key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n14 PARTITION (part = '1');
 
 -- Test join, should be bucketed and sorted by join key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n14 PARTITION (part = '1') 
 SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n14 PARTITION (part = '1');
 
 -- Test join with two keys, should be bucketed and sorted by join keys
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n14 PARTITION (part = '1') 
 SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key AND a.value = b.value;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n14 PARTITION (part = '1');
 
 -- Test join on three tables on same key, should be bucketed and sorted by join key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n14 PARTITION (part = '1') 
 SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.key = c.key);
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n14 PARTITION (part = '1');
 
 -- Test join on three tables on different keys, should be bucketed and sorted by latter key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n14 PARTITION (part = '1') 
 SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.value = c.value);
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n14 PARTITION (part = '1');
 
 -- Test group by in subquery with another group by outside, should be bucketed and sorted by the
 -- key of the outer group by
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
+INSERT OVERWRITE TABLE test_table_n14 PARTITION (part = '1')
 SELECT count(1), value FROM (SELECT key, count(1) as value FROM src group by key) a group by value;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n14 PARTITION (part = '1');

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/infer_join_preds.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/infer_join_preds.q b/ql/src/test/queries/clientpositive/infer_join_preds.q
index a9f5468..8e8c9f2 100644
--- a/ql/src/test/queries/clientpositive/infer_join_preds.q
+++ b/ql/src/test/queries/clientpositive/infer_join_preds.q
@@ -63,7 +63,7 @@ right outer join
 explain select * from src join src1 on src.key = src1.key and src.value = src1.value
     where 4 between src.key and src.value;
 
-    CREATE TABLE `table1`(
+    CREATE TABLE `table1_n8`(
        `idp_warehouse_id` bigint,
        `idp_audit_id` bigint,
        `idp_effective_date` date,
@@ -89,7 +89,7 @@ explain select * from src join src1 on src.key = src1.key and src.value = src1.v
        `odf_ss_actuals` bigint,
        `practsum` decimal(38,20));
 
-    CREATE TABLE `table2`(
+    CREATE TABLE `table2_n4`(
        `idp_warehouse_id` bigint,
        `idp_audit_id` bigint,
        `idp_effective_date` date,
@@ -116,14 +116,14 @@ explain select * from src join src1 on src.key = src1.key and src.value = src1.v
        `practsum` decimal(38,20));
 
     explain SELECT          s.idp_warehouse_id AS source_warehouse_id
-    FROM            table1 s
+    FROM            table1_n8 s
     JOIN
 
-                           table2 d
+                           table2_n4 d
     ON              (
                                     s.prid = d.prid )
     JOIN
-                             table2 e
+                             table2_n4 e
     ON
                                     s.prid = e.prid
     WHERE
@@ -277,7 +277,7 @@ explain select * from src join src1 on src.key = src1.key and src.value = src1.v
                                     ELSE d.odf_ss_actuals
                     END );
 
-drop table table2;
-drop table table1;
+drop table table2_n4;
+drop table table1_n8;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/innerjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/innerjoin.q b/ql/src/test/queries/clientpositive/innerjoin.q
index dd77ddd..368c83b 100644
--- a/ql/src/test/queries/clientpositive/innerjoin.q
+++ b/ql/src/test/queries/clientpositive/innerjoin.q
@@ -3,26 +3,26 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n20(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src src1 INNER JOIN src src2 ON (src1.key = src2.key)
-INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value;
+INSERT OVERWRITE TABLE dest_j1_n20 SELECT src1.key, src2.value;
 
 FROM src src1 INNER JOIN src src2 ON (src1.key = src2.key)
-INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value;
+INSERT OVERWRITE TABLE dest_j1_n20 SELECT src1.key, src2.value;
 
-SELECT dest_j1.* FROM dest_j1;
+SELECT dest_j1_n20.* FROM dest_j1_n20;
 
 -- verify that INNER is a non-reserved word for backwards compatibility
 -- change from HIVE-6617, inner is a SQL2011 reserved keyword.
-create table `inner`(i int);
+create table `inner`(i_n2 int);
 
-select i from `inner`;
+select i_n2 from `inner`;
 
-create table i(`inner` int);
+create table i_n2(`inner` int);
 
-select `inner` from i;
+select `inner` from i_n2;
 
 explain select * from (select * from src) `inner` left outer join src
 on `inner`.key=src.key;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/inoutdriver.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/inoutdriver.q b/ql/src/test/queries/clientpositive/inoutdriver.q
index 34b0e0b..b2fca93 100644
--- a/ql/src/test/queries/clientpositive/inoutdriver.q
+++ b/ql/src/test/queries/clientpositive/inoutdriver.q
@@ -1,2 +1,2 @@
-create table test (a int) stored as inputformat 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' outputformat 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'RCFileInDriver' outputdriver 'RCFileOutDriver';
-desc extended test;
+create table test_n3 (a int) stored as inputformat 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' outputformat 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'RCFileInDriver' outputdriver 'RCFileOutDriver';
+desc extended test_n3;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input1.q b/ql/src/test/queries/clientpositive/input1.q
index 3f2cd96..148f0d2 100644
--- a/ql/src/test/queries/clientpositive/input1.q
+++ b/ql/src/test/queries/clientpositive/input1.q
@@ -1,9 +1,9 @@
-CREATE TABLE TEST1(A INT, B DOUBLE) STORED AS TEXTFILE; 
+CREATE TABLE TEST1_n6(A INT, B DOUBLE) STORED AS TEXTFILE; 
 
 EXPLAIN
-DESCRIBE TEST1; 
+DESCRIBE TEST1_n6; 
 
-DESCRIBE TEST1; 
+DESCRIBE TEST1_n6; 
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input11.q b/ql/src/test/queries/clientpositive/input11.q
index c831357..f132b50 100644
--- a/ql/src/test/queries/clientpositive/input11.q
+++ b/ql/src/test/queries/clientpositive/input11.q
@@ -1,11 +1,11 @@
 set hive.mapred.mode=nonstrict;
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n152(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100;
+INSERT OVERWRITE TABLE dest1_n152 SELECT src.key, src.value WHERE src.key < 100;
 
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100;
+INSERT OVERWRITE TABLE dest1_n152 SELECT src.key, src.value WHERE src.key < 100;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n152.* FROM dest1_n152;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input11_limit.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input11_limit.q b/ql/src/test/queries/clientpositive/input11_limit.q
index 211c37a..f42ff83 100644
--- a/ql/src/test/queries/clientpositive/input11_limit.q
+++ b/ql/src/test/queries/clientpositive/input11_limit.q
@@ -2,13 +2,13 @@ set hive.stats.column.autogather=false;
 set hive.mapred.mode=nonstrict;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n153(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10;
+INSERT OVERWRITE TABLE dest1_n153 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10;
 
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10;
+INSERT OVERWRITE TABLE dest1_n153 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n153.* FROM dest1_n153;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input12.q b/ql/src/test/queries/clientpositive/input12.q
index b4ec1f1..d93c2ea 100644
--- a/ql/src/test/queries/clientpositive/input12.q
+++ b/ql/src/test/queries/clientpositive/input12.q
@@ -5,21 +5,21 @@ set hive.exec.mode.local.auto=true;
 
 -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
 
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE;
-CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n122(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest2_n32(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest3_n5(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
-INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
+INSERT OVERWRITE TABLE dest1_n122 SELECT src.* WHERE src.key < 100
+INSERT OVERWRITE TABLE dest2_n32 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
+INSERT OVERWRITE TABLE dest3_n5 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
 
 FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
-INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
+INSERT OVERWRITE TABLE dest1_n122 SELECT src.* WHERE src.key < 100
+INSERT OVERWRITE TABLE dest2_n32 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
+INSERT OVERWRITE TABLE dest3_n5 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
 
-SELECT dest1.* FROM dest1;
-SELECT dest2.* FROM dest2;
-SELECT dest3.* FROM dest3;
+SELECT dest1_n122.* FROM dest1_n122;
+SELECT dest2_n32.* FROM dest2_n32;
+SELECT dest3_n5.* FROM dest3_n5;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input12_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input12_hadoop20.q b/ql/src/test/queries/clientpositive/input12_hadoop20.q
index 17ee0a8..5044cd6 100644
--- a/ql/src/test/queries/clientpositive/input12_hadoop20.q
+++ b/ql/src/test/queries/clientpositive/input12_hadoop20.q
@@ -3,21 +3,21 @@ set hive.exec.mode.local.auto=true;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
 
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE;
-CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n88(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest2_n23(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest3_n2(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
-INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
+INSERT OVERWRITE TABLE dest1_n88 SELECT src.* WHERE src.key < 100
+INSERT OVERWRITE TABLE dest2_n23 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
+INSERT OVERWRITE TABLE dest3_n2 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
 
 FROM src 
-INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
-INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
+INSERT OVERWRITE TABLE dest1_n88 SELECT src.* WHERE src.key < 100
+INSERT OVERWRITE TABLE dest2_n23 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
+INSERT OVERWRITE TABLE dest3_n2 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200;
 
-SELECT dest1.* FROM dest1;
-SELECT dest2.* FROM dest2;
-SELECT dest3.* FROM dest3;
+SELECT dest1_n88.* FROM dest1_n88;
+SELECT dest2_n23.* FROM dest2_n23;
+SELECT dest3_n2.* FROM dest3_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input13.q b/ql/src/test/queries/clientpositive/input13.q
index 02be927..0726aa0 100644
--- a/ql/src/test/queries/clientpositive/input13.q
+++ b/ql/src/test/queries/clientpositive/input13.q
@@ -1,22 +1,22 @@
 set hive.mapred.mode=nonstrict;
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE;
-CREATE TABLE dest3(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n77(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest2_n16(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest3_n1(key INT) PARTITIONED BY(ds STRING, hr STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
-INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300
+INSERT OVERWRITE TABLE dest1_n77 SELECT src.* WHERE src.key < 100
+INSERT OVERWRITE TABLE dest2_n16 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
+INSERT OVERWRITE TABLE dest3_n1 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300
 INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300;
 
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
-INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300
+INSERT OVERWRITE TABLE dest1_n77 SELECT src.* WHERE src.key < 100
+INSERT OVERWRITE TABLE dest2_n16 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
+INSERT OVERWRITE TABLE dest3_n1 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300
 INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300;
 
-SELECT dest1.* FROM dest1;
-SELECT dest2.* FROM dest2;
-SELECT dest3.* FROM dest3;
+SELECT dest1_n77.* FROM dest1_n77;
+SELECT dest2_n16.* FROM dest2_n16;
+SELECT dest3_n1.* FROM dest3_n1;
 dfs -cat ${system:test.warehouse.dir}/dest4.out/*;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input14.q b/ql/src/test/queries/clientpositive/input14.q
index e854acf..ffa9a5a 100644
--- a/ql/src/test/queries/clientpositive/input14.q
+++ b/ql/src/test/queries/clientpositive/input14.q
@@ -1,4 +1,5 @@
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+--! qt:dataset:src
+CREATE TABLE dest1_n42(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM (
@@ -7,7 +8,7 @@ FROM (
          USING 'cat' AS (tkey, tvalue) 
   CLUSTER BY tkey 
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
+INSERT OVERWRITE TABLE dest1_n42 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
 
 FROM (
   FROM src
@@ -15,8 +16,8 @@ FROM (
          USING 'cat' AS (tkey, tvalue) 
   CLUSTER BY tkey 
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
+INSERT OVERWRITE TABLE dest1_n42 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
 
 -- SORT_QUERY_RESULTS
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n42.* FROM dest1_n42;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input14_limit.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input14_limit.q b/ql/src/test/queries/clientpositive/input14_limit.q
index 2f6e4e4..0b10842 100644
--- a/ql/src/test/queries/clientpositive/input14_limit.q
+++ b/ql/src/test/queries/clientpositive/input14_limit.q
@@ -1,6 +1,6 @@
 set hive.stats.column.autogather=false;
 
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n13(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM (
@@ -9,7 +9,7 @@ FROM (
          USING 'cat' AS (tkey, tvalue) 
   CLUSTER BY tkey LIMIT 20
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
+INSERT OVERWRITE TABLE dest1_n13 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
 
 FROM (
   FROM src
@@ -17,6 +17,6 @@ FROM (
          USING 'cat' AS (tkey, tvalue) 
   CLUSTER BY tkey LIMIT 20
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
+INSERT OVERWRITE TABLE dest1_n13 SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n13.* FROM dest1_n13;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input17.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input17.q b/ql/src/test/queries/clientpositive/input17.q
index d348c40..df8c3e0 100644
--- a/ql/src/test/queries/clientpositive/input17.q
+++ b/ql/src/test/queries/clientpositive/input17.q
@@ -1,4 +1,5 @@
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+--! qt:dataset:src_thrift
+CREATE TABLE dest1_n81(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM (
@@ -7,7 +8,7 @@ FROM (
          USING 'cat' AS (tkey, tvalue) 
   CLUSTER BY tkey 
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue;
+INSERT OVERWRITE TABLE dest1_n81 SELECT tmap.tkey, tmap.tvalue;
 
 FROM (
   FROM src_thrift
@@ -15,8 +16,8 @@ FROM (
          USING 'cat' AS (tkey, tvalue) 
   CLUSTER BY tkey 
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue;
+INSERT OVERWRITE TABLE dest1_n81 SELECT tmap.tkey, tmap.tvalue;
 
 -- SORT_QUERY_RESULTS
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n81.* FROM dest1_n81;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input18.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input18.q b/ql/src/test/queries/clientpositive/input18.q
index 280fb7b..4f361d1 100644
--- a/ql/src/test/queries/clientpositive/input18.q
+++ b/ql/src/test/queries/clientpositive/input18.q
@@ -1,4 +1,5 @@
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+--! qt:dataset:src
+CREATE TABLE dest1_n124(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM (
@@ -7,7 +8,7 @@ FROM (
          USING 'cat'
   CLUSTER BY key
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100;
+INSERT OVERWRITE TABLE dest1_n124 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100;
 
 FROM (
   FROM src
@@ -15,8 +16,8 @@ FROM (
          USING 'cat'
   CLUSTER BY key
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100;
+INSERT OVERWRITE TABLE dest1_n124 SELECT tmap.key, regexp_replace(tmap.value,'\t','+') WHERE tmap.key < 100;
 
 -- SORT_QUERY_RESULTS
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n124.* FROM dest1_n124;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input1_limit.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input1_limit.q b/ql/src/test/queries/clientpositive/input1_limit.q
index afea986..60bc76d 100644
--- a/ql/src/test/queries/clientpositive/input1_limit.q
+++ b/ql/src/test/queries/clientpositive/input1_limit.q
@@ -1,19 +1,19 @@
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-CREATE TABLE dest2(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n12(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest2_n2(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5;
+INSERT OVERWRITE TABLE dest1_n12 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10
+INSERT OVERWRITE TABLE dest2_n2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5;
 
 FROM src
-INSERT OVERWRITE TABLE dest1 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10
-INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5;
+INSERT OVERWRITE TABLE dest1_n12 SELECT src.key, src.value WHERE src.key < 100 LIMIT 10
+INSERT OVERWRITE TABLE dest2_n2 SELECT src.key, src.value WHERE src.key < 100 LIMIT 5;
 
-SELECT dest1.* FROM dest1;
-SELECT dest2.* FROM dest2;
+SELECT dest1_n12.* FROM dest1_n12;
+SELECT dest2_n2.* FROM dest2_n2;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input20.q b/ql/src/test/queries/clientpositive/input20.q
index 3c2f78f..fb425b7 100644
--- a/ql/src/test/queries/clientpositive/input20.q
+++ b/ql/src/test/queries/clientpositive/input20.q
@@ -1,4 +1,5 @@
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+--! qt:dataset:src
+CREATE TABLE dest1_n138(key INT, value STRING) STORED AS TEXTFILE;
 
 ADD FILE ../../data/scripts/input20_script.py;
 
@@ -9,7 +10,7 @@ FROM (
   USING 'cat'
   DISTRIBUTE BY key, value
 ) tmap
-INSERT OVERWRITE TABLE dest1
+INSERT OVERWRITE TABLE dest1_n138
 REDUCE tmap.key, tmap.value
 USING 'python input20_script.py'
 AS key, value;
@@ -20,9 +21,9 @@ FROM (
   USING 'cat' 
   DISTRIBUTE BY key, value
 ) tmap
-INSERT OVERWRITE TABLE dest1
+INSERT OVERWRITE TABLE dest1_n138
 REDUCE tmap.key, tmap.value
 USING 'python input20_script.py'
 AS key, value;
 
-SELECT * FROM dest1 ORDER BY key, value;
+SELECT * FROM dest1_n138 ORDER BY key, value;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input24.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input24.q b/ql/src/test/queries/clientpositive/input24.q
index 95b2377..b4b9bfc 100644
--- a/ql/src/test/queries/clientpositive/input24.q
+++ b/ql/src/test/queries/clientpositive/input24.q
@@ -1,9 +1,9 @@
 
-create table tst(a int, b int) partitioned by (d string);
-alter table tst add partition (d='2009-01-01');
+create table tst_n1(a int, b int) partitioned by (d string);
+alter table tst_n1 add partition (d='2009-01-01');
 explain
-select count(1) from tst x where x.d='2009-01-01';
+select count(1) from tst_n1 x where x.d='2009-01-01';
 
-select count(1) from tst x where x.d='2009-01-01';
+select count(1) from tst_n1 x where x.d='2009-01-01';
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input28.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input28.q b/ql/src/test/queries/clientpositive/input28.q
index 49479ed..388e720 100644
--- a/ql/src/test/queries/clientpositive/input28.q
+++ b/ql/src/test/queries/clientpositive/input28.q
@@ -1,11 +1,11 @@
 set hive.mapred.mode=nonstrict;
 
-create table tst(a string, b string) partitioned by (d string);
-alter table tst add partition (d='2009-01-01');
+create table tst_n0(a string, b string) partitioned by (d string);
+alter table tst_n0 add partition (d='2009-01-01');
 
-insert overwrite table tst partition(d='2009-01-01')
-select tst.a, src.value from tst join src ON (tst.a = src.key);
+insert overwrite table tst_n0 partition(d='2009-01-01')
+select tst_n0.a, src.value from tst_n0 join src ON (tst_n0.a = src.key);
 
-select * from tst where tst.d='2009-01-01';
+select * from tst_n0 where tst_n0.d='2009-01-01';
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input33.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input33.q b/ql/src/test/queries/clientpositive/input33.q
index 3309045..26cb725 100644
--- a/ql/src/test/queries/clientpositive/input33.q
+++ b/ql/src/test/queries/clientpositive/input33.q
@@ -1,4 +1,5 @@
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+--! qt:dataset:src
+CREATE TABLE dest1_n135(key INT, value STRING) STORED AS TEXTFILE;
 
 ADD FILE ../../data/scripts/input20_script.py;
 
@@ -9,7 +10,7 @@ FROM (
   USING 'cat'
   DISTRIBUTE BY key, value
 ) tmap
-INSERT OVERWRITE TABLE dest1
+INSERT OVERWRITE TABLE dest1_n135
 REDUCE tmap.key, tmap.value
 USING 'python input20_script.py'
 AS (key STRING, value STRING);
@@ -20,9 +21,9 @@ FROM (
   USING 'cat' 
   DISTRIBUTE BY key, value
 ) tmap
-INSERT OVERWRITE TABLE dest1
+INSERT OVERWRITE TABLE dest1_n135
 REDUCE tmap.key, tmap.value
 USING 'python input20_script.py'
 AS (key STRING, value STRING);
 
-SELECT * FROM dest1 ORDER BY key, value;
+SELECT * FROM dest1_n135 ORDER BY key, value;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input34.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input34.q b/ql/src/test/queries/clientpositive/input34.q
index c600bf5..293a9c4 100644
--- a/ql/src/test/queries/clientpositive/input34.q
+++ b/ql/src/test/queries/clientpositive/input34.q
@@ -1,4 +1,5 @@
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+--! qt:dataset:src
+CREATE TABLE dest1_n161(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM (
@@ -7,7 +8,7 @@ FROM (
   USING 'cat'
   AS (tkey, tvalue) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue;
+INSERT OVERWRITE TABLE dest1_n161 SELECT tkey, tvalue;
 
 FROM (
   FROM src
@@ -15,6 +16,6 @@ FROM (
   USING 'cat'
   AS (tkey, tvalue) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue;
+INSERT OVERWRITE TABLE dest1_n161 SELECT tkey, tvalue;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n161.* FROM dest1_n161;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input35.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input35.q b/ql/src/test/queries/clientpositive/input35.q
index 5e37ea3..0c6fa62 100644
--- a/ql/src/test/queries/clientpositive/input35.q
+++ b/ql/src/test/queries/clientpositive/input35.q
@@ -1,4 +1,5 @@
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+--! qt:dataset:src
+CREATE TABLE dest1_n25(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM (
@@ -7,7 +8,7 @@ FROM (
   USING 'cat'
   AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002'
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue;
+INSERT OVERWRITE TABLE dest1_n25 SELECT tkey, tvalue;
 
 FROM (
   FROM src
@@ -15,6 +16,6 @@ FROM (
   USING 'cat'
   AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002'
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue;
+INSERT OVERWRITE TABLE dest1_n25 SELECT tkey, tvalue;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n25.* FROM dest1_n25;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input36.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input36.q b/ql/src/test/queries/clientpositive/input36.q
index fd1968f..2f8bee5 100644
--- a/ql/src/test/queries/clientpositive/input36.q
+++ b/ql/src/test/queries/clientpositive/input36.q
@@ -1,4 +1,5 @@
-CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+--! qt:dataset:src
+CREATE TABLE dest1_n70(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM (
@@ -7,7 +8,7 @@ FROM (
   USING 'cat'
   AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\003'
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue;
+INSERT OVERWRITE TABLE dest1_n70 SELECT tkey, tvalue;
 
 FROM (
   FROM src
@@ -15,6 +16,6 @@ FROM (
   USING 'cat'
   AS (tkey, tvalue) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\003'
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tkey, tvalue;
+INSERT OVERWRITE TABLE dest1_n70 SELECT tkey, tvalue;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n70.* FROM dest1_n70;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input38.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input38.q b/ql/src/test/queries/clientpositive/input38.q
index 1ccc5f2..a04c227 100644
--- a/ql/src/test/queries/clientpositive/input38.q
+++ b/ql/src/test/queries/clientpositive/input38.q
@@ -1,5 +1,5 @@
 
-CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n91(key STRING, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM (
@@ -7,16 +7,16 @@ FROM (
   SELECT TRANSFORM(src.key, src.value, 1+2, 3+4)
          USING 'cat'
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tmap.key, tmap.value;
+INSERT OVERWRITE TABLE dest1_n91 SELECT tmap.key, tmap.value;
 
 FROM (
   FROM src
   SELECT TRANSFORM(src.key, src.value, 1+2, 3+4)
          USING 'cat'
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tmap.key, tmap.value;
+INSERT OVERWRITE TABLE dest1_n91 SELECT tmap.key, tmap.value;
 
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n91.* FROM dest1_n91;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input39.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input39.q b/ql/src/test/queries/clientpositive/input39.q
index 471736b..764ccf7 100644
--- a/ql/src/test/queries/clientpositive/input39.q
+++ b/ql/src/test/queries/clientpositive/input39.q
@@ -1,16 +1,16 @@
 -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
 
 
-create table t1(key string, value string) partitioned by (ds string);
-create table t2(key string, value string) partitioned by (ds string);
+create table t1_n121(key string, value string) partitioned by (ds string);
+create table t2_n71(key string, value string) partitioned by (ds string);
 
-insert overwrite table t1 partition (ds='1')
+insert overwrite table t1_n121 partition (ds='1')
 select key, value from src;
 
-insert overwrite table t1 partition (ds='2')
+insert overwrite table t1_n121 partition (ds='2')
 select key, value from src;
 
-insert overwrite table t2 partition (ds='1')
+insert overwrite table t2_n71 partition (ds='1')
 select key, value from src;
 
 set hive.test.mode=true;
@@ -20,9 +20,9 @@ set mapreduce.jobtracker.address=localhost:58;
 set hive.exec.mode.local.auto=true;
 
 explain
-select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1';
+select count(1) from t1_n121 join t2_n71 on t1_n121.key=t2_n71.key where t1_n121.ds='1' and t2_n71.ds='1';
 
-select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1';
+select count(1) from t1_n121 join t2_n71 on t1_n121.key=t2_n71.key where t1_n121.ds='1' and t2_n71.ds='1';
 
 set hive.test.mode=false;
 set mapreduce.framework.name;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input39_hadoop20.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input39_hadoop20.q b/ql/src/test/queries/clientpositive/input39_hadoop20.q
index 29e9fae..6166eef 100644
--- a/ql/src/test/queries/clientpositive/input39_hadoop20.q
+++ b/ql/src/test/queries/clientpositive/input39_hadoop20.q
@@ -1,16 +1,16 @@
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
 
 
-create table t1(key string, value string) partitioned by (ds string);
-create table t2(key string, value string) partitioned by (ds string);
+create table t1_n77(key string, value string) partitioned by (ds string);
+create table t2_n46(key string, value string) partitioned by (ds string);
 
-insert overwrite table t1 partition (ds='1')
+insert overwrite table t1_n77 partition (ds='1')
 select key, value from src;
 
-insert overwrite table t1 partition (ds='2')
+insert overwrite table t1_n77 partition (ds='2')
 select key, value from src;
 
-insert overwrite table t2 partition (ds='1')
+insert overwrite table t2_n46 partition (ds='1')
 select key, value from src;
 
 set hive.test.mode=true;
@@ -19,9 +19,9 @@ set mapred.job.tracker=localhost:58;
 set hive.exec.mode.local.auto=true;
 
 explain
-select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1';
+select count(1) from t1_n77 join t2_n46 on t1_n77.key=t2_n46.key where t1_n77.ds='1' and t2_n46.ds='1';
 
-select count(1) from t1 join t2 on t1.key=t2.key where t1.ds='1' and t2.ds='1';
+select count(1) from t1_n77 join t2_n46 on t1_n77.key=t2_n46.key where t1_n77.ds='1' and t2_n46.ds='1';
 
 set hive.test.mode=false;
 set mapred.job.tracker;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input3_limit.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input3_limit.q b/ql/src/test/queries/clientpositive/input3_limit.q
index 3e9af60..822f8df 100644
--- a/ql/src/test/queries/clientpositive/input3_limit.q
+++ b/ql/src/test/queries/clientpositive/input3_limit.q
@@ -1,17 +1,17 @@
 
-CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1;
-LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T1;
+CREATE TABLE T1_n78(key STRING, value STRING) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1_n78;
+LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T1_n78;
 
 
-CREATE TABLE T2(key STRING, value STRING);
+CREATE TABLE T2_n47(key STRING, value STRING);
 
 EXPLAIN 
-INSERT OVERWRITE TABLE T2 SELECT * FROM (SELECT * FROM T1 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20;
+INSERT OVERWRITE TABLE T2_n47 SELECT * FROM (SELECT * FROM T1_n78 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20;
 
-INSERT OVERWRITE TABLE T2 SELECT * FROM (SELECT * FROM T1 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20;
+INSERT OVERWRITE TABLE T2_n47 SELECT * FROM (SELECT * FROM T1_n78 DISTRIBUTE BY key) T ORDER BY key, value LIMIT 20;
 
-SELECT * FROM T2 ORDER BY key, value;
+SELECT * FROM T2_n47 ORDER BY key, value;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input4.q b/ql/src/test/queries/clientpositive/input4.q
index 90fcbdd..adee91a 100644
--- a/ql/src/test/queries/clientpositive/input4.q
+++ b/ql/src/test/queries/clientpositive/input4.q
@@ -1,9 +1,9 @@
 
-CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE;
+CREATE TABLE INPUT4_n0(KEY STRING, VALUE STRING) STORED AS TEXTFILE;
 EXPLAIN
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4;
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4_n0;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4_n0;
 EXPLAIN FORMATTED
-SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias;
-SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
+SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4_n0 AS Input4Alias;
+SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4_n0 AS Input4Alias
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input44.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input44.q b/ql/src/test/queries/clientpositive/input44.q
index 2e975e5..c4ed032 100644
--- a/ql/src/test/queries/clientpositive/input44.q
+++ b/ql/src/test/queries/clientpositive/input44.q
@@ -1,6 +1,7 @@
-CREATE TABLE dest(key INT, value STRING) STORED AS TEXTFILE;
+--! qt:dataset:src
+CREATE TABLE dest_n0(key INT, value STRING) STORED AS TEXTFILE;
 
 SET hive.output.file.extension=.txt;
-INSERT OVERWRITE TABLE dest SELECT src.* FROM src;
+INSERT OVERWRITE TABLE dest_n0 SELECT src.* FROM src;
 
-dfs -cat ${system:test.warehouse.dir}/dest/*.txt
+dfs -cat ${system:test.warehouse.dir}/dest_n0/*.txt

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input5.q b/ql/src/test/queries/clientpositive/input5.q
index 2af9163..bf598de 100644
--- a/ql/src/test/queries/clientpositive/input5.q
+++ b/ql/src/test/queries/clientpositive/input5.q
@@ -1,4 +1,5 @@
-CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE;
+--! qt:dataset:src_thrift
+CREATE TABLE dest1_n94(key STRING, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM (
@@ -7,7 +8,7 @@ FROM (
          USING 'cat' AS (tkey, tvalue) 
   CLUSTER BY tkey 
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue;
+INSERT OVERWRITE TABLE dest1_n94 SELECT tmap.tkey, tmap.tvalue;
 
 FROM (
   FROM src_thrift
@@ -15,6 +16,6 @@ FROM (
          USING 'cat' AS (tkey, tvalue) 
   CLUSTER BY tkey 
 ) tmap
-INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue;
+INSERT OVERWRITE TABLE dest1_n94 SELECT tmap.tkey, tmap.tvalue;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n94.* FROM dest1_n94;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input6.q b/ql/src/test/queries/clientpositive/input6.q
index 4f2f17e..c243a6b 100644
--- a/ql/src/test/queries/clientpositive/input6.q
+++ b/ql/src/test/queries/clientpositive/input6.q
@@ -1,10 +1,11 @@
-CREATE TABLE dest1(key STRING, value STRING) STORED AS TEXTFILE;
+--! qt:dataset:src1
+CREATE TABLE dest1_n35(key STRING, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src1
-INSERT OVERWRITE TABLE dest1 SELECT src1.key, src1.value WHERE src1.key is null;
+INSERT OVERWRITE TABLE dest1_n35 SELECT src1.key, src1.value WHERE src1.key is null;
 
 FROM src1
-INSERT OVERWRITE TABLE dest1 SELECT src1.key, src1.value WHERE src1.key is null;
+INSERT OVERWRITE TABLE dest1_n35 SELECT src1.key, src1.value WHERE src1.key is null;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n35.* FROM dest1_n35;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input7.q b/ql/src/test/queries/clientpositive/input7.q
index f2e1ed0..eba4c88 100644
--- a/ql/src/test/queries/clientpositive/input7.q
+++ b/ql/src/test/queries/clientpositive/input7.q
@@ -1,10 +1,11 @@
-CREATE TABLE dest1(c1 DOUBLE, c2 INT) STORED AS TEXTFILE;
+--! qt:dataset:src1
+CREATE TABLE dest1_n167(c1 DOUBLE, c2 INT) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src1
-INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key;
+INSERT OVERWRITE TABLE dest1_n167 SELECT NULL, src1.key;
 
 FROM src1
-INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key;
+INSERT OVERWRITE TABLE dest1_n167 SELECT NULL, src1.key;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n167.* FROM dest1_n167;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input8.q b/ql/src/test/queries/clientpositive/input8.q
index cf24a55..7f81d64 100644
--- a/ql/src/test/queries/clientpositive/input8.q
+++ b/ql/src/test/queries/clientpositive/input8.q
@@ -1,11 +1,11 @@
 set hive.mapred.mode=nonstrict;
-CREATE TABLE dest1(c1 STRING, c2 INT, c3 DOUBLE) STORED AS TEXTFILE;
+CREATE TABLE dest1_n28(c1 STRING, c2 INT, c3 DOUBLE) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src1 
-INSERT OVERWRITE TABLE dest1 SELECT 4 + NULL, src1.key - NULL, NULL + NULL;
+INSERT OVERWRITE TABLE dest1_n28 SELECT 4 + NULL, src1.key - NULL, NULL + NULL;
 
 FROM src1 
-INSERT OVERWRITE TABLE dest1 SELECT 4 + NULL, src1.key - NULL, NULL + NULL;
+INSERT OVERWRITE TABLE dest1_n28 SELECT 4 + NULL, src1.key - NULL, NULL + NULL;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n28.* FROM dest1_n28;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input9.q b/ql/src/test/queries/clientpositive/input9.q
index ded20a8..d44114c 100644
--- a/ql/src/test/queries/clientpositive/input9.q
+++ b/ql/src/test/queries/clientpositive/input9.q
@@ -1,11 +1,12 @@
-CREATE TABLE dest1(value STRING, key INT) STORED AS TEXTFILE;
+--! qt:dataset:src1
+CREATE TABLE dest1_n159(value STRING, key INT) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src1
-INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key where NULL = NULL;
+INSERT OVERWRITE TABLE dest1_n159 SELECT NULL, src1.key where NULL = NULL;
 
 FROM src1
-INSERT OVERWRITE TABLE dest1 SELECT NULL, src1.key where NULL = NULL;
+INSERT OVERWRITE TABLE dest1_n159 SELECT NULL, src1.key where NULL = NULL;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n159.* FROM dest1_n159;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input_dynamicserde.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input_dynamicserde.q b/ql/src/test/queries/clientpositive/input_dynamicserde.q
index 1437742..544977a 100644
--- a/ql/src/test/queries/clientpositive/input_dynamicserde.q
+++ b/ql/src/test/queries/clientpositive/input_dynamicserde.q
@@ -1,4 +1,5 @@
-CREATE TABLE dest1(a array<int>, b array<string>, c map<string,string>, d int, e string)
+--! qt:dataset:src_thrift
+CREATE TABLE dest1_n114(a array<int>, b array<string>, c map<string,string>, d int, e string)
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '1'
 COLLECTION ITEMS TERMINATED BY '2'
@@ -8,11 +9,11 @@ STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring;
+INSERT OVERWRITE TABLE dest1_n114 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring;
 
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring;
+INSERT OVERWRITE TABLE dest1_n114 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring;
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n114.* FROM dest1_n114;
 
-SELECT dest1.a[0], dest1.b[0], dest1.c['key2'], dest1.d, dest1.e FROM dest1;
+SELECT dest1_n114.a[0], dest1_n114.b[0], dest1_n114.c['key2'], dest1_n114.d, dest1_n114.e FROM dest1_n114;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input_lazyserde.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input_lazyserde.q b/ql/src/test/queries/clientpositive/input_lazyserde.q
index c37e48b..3264411 100644
--- a/ql/src/test/queries/clientpositive/input_lazyserde.q
+++ b/ql/src/test/queries/clientpositive/input_lazyserde.q
@@ -4,8 +4,8 @@ set hive.test.vectorized.execution.enabled.override=none;
 
 -- SORT_QUERY_RESULTS
 
-DROP TABLE dest1;
-CREATE TABLE dest1(a array<int>, b array<string>, c map<string,string>, d int, e string)
+DROP TABLE dest1_n43;
+CREATE TABLE dest1_n43(a array<int>, b array<string>, c map<string,string>, d int, e string)
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '1'
 COLLECTION ITEMS TERMINATED BY '2'
@@ -15,38 +15,38 @@ STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1;
+INSERT OVERWRITE TABLE dest1_n43 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1;
 
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1;
+INSERT OVERWRITE TABLE dest1_n43 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1;
 
-SELECT dest1.* FROM dest1 CLUSTER BY 1;
+SELECT dest1_n43.* FROM dest1_n43 CLUSTER BY 1;
 
-SELECT dest1.a[0], dest1.b[0], dest1.c['key2'], dest1.d, dest1.e FROM dest1 CLUSTER BY 1;
+SELECT dest1_n43.a[0], dest1_n43.b[0], dest1_n43.c['key2'], dest1_n43.d, dest1_n43.e FROM dest1_n43 CLUSTER BY 1;
 
-DROP TABLE dest1;
+DROP TABLE dest1_n43;
 
-CREATE TABLE dest1(a array<int>) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\';
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1;
-SELECT * from dest1;
-DROP TABLE dest1;
+CREATE TABLE dest1_n43(a array<int>) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\';
+INSERT OVERWRITE TABLE dest1_n43 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1;
+SELECT * from dest1_n43;
+DROP TABLE dest1_n43;
 
-CREATE TABLE dest1(a map<string,string>) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\';
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1;
-SELECT * from dest1;
+CREATE TABLE dest1_n43(a map<string,string>) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\';
+INSERT OVERWRITE TABLE dest1_n43 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1;
+SELECT * from dest1_n43;
 
 CREATE TABLE destBin(a UNIONTYPE<int, double, array<string>, struct<col1:int,col2:string>>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' STORED AS SEQUENCEFILE;
 INSERT OVERWRITE TABLE destBin SELECT create_union( CASE WHEN key < 100 THEN 0 WHEN key < 200 THEN 1 WHEN key < 300 THEN 2 WHEN key < 400 THEN 3 ELSE 0 END, key, 2.0D, array("one","two"), struct(5,"five")) FROM srcbucket2;
 SELECT * from destBin;
 DROP TABLE destBin;
 
-DROP TABLE dest2;
+DROP TABLE dest2_n7;
 DROP TABLE dest3;
 
-CREATE TABLE dest2 (a map<string,map<string,map<string,uniontype<int, bigint, string, double, boolean, array<string>, map<string,string>>>>>)
+CREATE TABLE dest2_n7 (a map<string,map<string,map<string,uniontype<int, bigint, string, double, boolean, array<string>, map<string,string>>>>>)
   ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe' STORED AS SEQUENCEFILE;
-INSERT OVERWRITE TABLE dest2 SELECT src_thrift.attributes FROM src_thrift;
-SELECT a from dest2 limit 10;
+INSERT OVERWRITE TABLE dest2_n7 SELECT src_thrift.attributes FROM src_thrift;
+SELECT a from dest2_n7 limit 10;
 
 CREATE TABLE dest3 (
 unionfield1 uniontype<int, bigint, string, double, boolean, array<string>, map<string,string>>,

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input_lazyserde2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input_lazyserde2.q b/ql/src/test/queries/clientpositive/input_lazyserde2.q
index 2a2cf06..e6eefd5 100644
--- a/ql/src/test/queries/clientpositive/input_lazyserde2.q
+++ b/ql/src/test/queries/clientpositive/input_lazyserde2.q
@@ -4,8 +4,8 @@ set hive.test.vectorized.execution.enabled.override=none;
 
 -- SORT_QUERY_RESULTS
 
-DROP TABLE dest1;
-CREATE TABLE dest1(a array<int>, b array<string>, c map<string,string>, d int, e string)
+DROP TABLE dest1_n148;
+CREATE TABLE dest1_n148(a array<int>, b array<string>, c map<string,string>, d int, e string)
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '1'
 COLLECTION ITEMS TERMINATED BY '2'
@@ -15,43 +15,43 @@ STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1;
+INSERT OVERWRITE TABLE dest1_n148 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1;
 
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1;
+INSERT OVERWRITE TABLE dest1_n148 SELECT src_thrift.lint, src_thrift.lstring, src_thrift.mstringstring, src_thrift.aint, src_thrift.astring DISTRIBUTE BY 1;
 
-SELECT dest1.* FROM dest1 CLUSTER BY 1;
+SELECT dest1_n148.* FROM dest1_n148 CLUSTER BY 1;
 
-SELECT dest1.a[0], dest1.b[0], dest1.c['key2'], dest1.d, dest1.e FROM dest1 CLUSTER BY 1;
+SELECT dest1_n148.a[0], dest1_n148.b[0], dest1_n148.c['key2'], dest1_n148.d, dest1_n148.e FROM dest1_n148 CLUSTER BY 1;
 
-DROP TABLE dest1;
+DROP TABLE dest1_n148;
 
-CREATE TABLE dest1(a array<int>) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\';
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1;
-SELECT * from dest1;
-DROP TABLE dest1;
+CREATE TABLE dest1_n148(a array<int>) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\';
+INSERT OVERWRITE TABLE dest1_n148 SELECT src_thrift.lint FROM src_thrift DISTRIBUTE BY 1;
+SELECT * from dest1_n148;
+DROP TABLE dest1_n148;
 
-CREATE TABLE dest1(a map<string,string>) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\';
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1;
-SELECT * from dest1;
+CREATE TABLE dest1_n148(a map<string,string>) ROW FORMAT DELIMITED FIELDS TERMINATED BY '1' ESCAPED BY '\\';
+INSERT OVERWRITE TABLE dest1_n148 SELECT src_thrift.mstringstring FROM src_thrift DISTRIBUTE BY 1;
+SELECT * from dest1_n148;
 
-CREATE TABLE destBin(a UNIONTYPE<int, double, array<string>, struct<col1:int,col2:string>>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE;
-INSERT OVERWRITE TABLE destBin SELECT create_union( CASE WHEN key < 100 THEN 0 WHEN key < 200 THEN 1 WHEN key < 300 THEN 2 WHEN key < 400 THEN 3 ELSE 0 END, key, 2.0D, array("one","two"), struct(5,"five")) FROM srcbucket2;
-SELECT * from destBin;
-DROP TABLE destBin;
+CREATE TABLE destBin_n0(a UNIONTYPE<int, double, array<string>, struct<col1:int,col2:string>>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE;
+INSERT OVERWRITE TABLE destBin_n0 SELECT create_union( CASE WHEN key < 100 THEN 0 WHEN key < 200 THEN 1 WHEN key < 300 THEN 2 WHEN key < 400 THEN 3 ELSE 0 END, key, 2.0D, array("one","two"), struct(5,"five")) FROM srcbucket2;
+SELECT * from destBin_n0;
+DROP TABLE destBin_n0;
 
-DROP TABLE dest2;
-DROP TABLE dest3;
+DROP TABLE dest2_n38;
+DROP TABLE dest3_n6;
 
-CREATE TABLE dest2 (a map<string,map<string,map<string,uniontype<int, bigint, string, double, boolean, array<string>, map<string,string>>>>>)
+CREATE TABLE dest2_n38 (a map<string,map<string,map<string,uniontype<int, bigint, string, double, boolean, array<string>, map<string,string>>>>>)
   ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE;
-INSERT OVERWRITE TABLE dest2 SELECT src_thrift.attributes FROM src_thrift;
-SELECT a from dest2 limit 10;
+INSERT OVERWRITE TABLE dest2_n38 SELECT src_thrift.attributes FROM src_thrift;
+SELECT a from dest2_n38 limit 10;
 
-CREATE TABLE dest3 (
+CREATE TABLE dest3_n6 (
 unionfield1 uniontype<int, bigint, string, double, boolean, array<string>, map<string,string>>,
 unionfield2 uniontype<int, bigint, string, double, boolean, array<string>, map<string,string>>,
 unionfield3 uniontype<int, bigint, string, double, boolean, array<string>, map<string,string>>
 ) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe2' STORED AS SEQUENCEFILE;
-INSERT OVERWRITE TABLE dest3 SELECT src_thrift.unionField1,src_thrift.unionField2,src_thrift.unionField3 from src_thrift;
-SELECT unionfield1, unionField2, unionfield3 from dest3 limit 10;
+INSERT OVERWRITE TABLE dest3_n6 SELECT src_thrift.unionField1,src_thrift.unionField2,src_thrift.unionField3 from src_thrift;
+SELECT unionfield1, unionField2, unionfield3 from dest3_n6 limit 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input_part1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input_part1.q b/ql/src/test/queries/clientpositive/input_part1.q
index 0a3a3b4..3857b59 100644
--- a/ql/src/test/queries/clientpositive/input_part1.q
+++ b/ql/src/test/queries/clientpositive/input_part1.q
@@ -1,12 +1,12 @@
 set hive.mapred.mode=nonstrict;
-CREATE TABLE dest1(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE;
+CREATE TABLE dest1_n45(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE;
 
 EXPLAIN EXTENDED
 FROM srcpart
-INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12';
+INSERT OVERWRITE TABLE dest1_n45 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12';
 
 FROM srcpart
-INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12';
+INSERT OVERWRITE TABLE dest1_n45 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12';
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n45.* FROM dest1_n45;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input_part2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input_part2.q b/ql/src/test/queries/clientpositive/input_part2.q
index aff02e3..67f064f 100644
--- a/ql/src/test/queries/clientpositive/input_part2.q
+++ b/ql/src/test/queries/clientpositive/input_part2.q
@@ -1,18 +1,19 @@
-CREATE TABLE dest1(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE;
-CREATE TABLE dest2(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE;
+--! qt:dataset:srcpart
+CREATE TABLE dest1_n84(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE;
+CREATE TABLE dest2_n20(key INT, value STRING, hr STRING, ds STRING) STORED AS TEXTFILE;
 
 -- SORT_QUERY_RESULTS
 
 EXPLAIN EXTENDED
 FROM srcpart
-INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12'
-INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12';
+INSERT OVERWRITE TABLE dest1_n84 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12'
+INSERT OVERWRITE TABLE dest2_n20 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12';
 
 FROM srcpart
-INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12'
-INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12';
+INSERT OVERWRITE TABLE dest1_n84 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12'
+INSERT OVERWRITE TABLE dest2_n20 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12';
 
-SELECT dest1.* FROM dest1 sort by key,value,ds,hr;
-SELECT dest2.* FROM dest2 sort by key,value,ds,hr;
+SELECT dest1_n84.* FROM dest1_n84 sort by key,value,ds,hr;
+SELECT dest2_n20.* FROM dest2_n20 sort by key,value,ds,hr;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input_part5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input_part5.q b/ql/src/test/queries/clientpositive/input_part5.q
index 029aa05..9551210 100644
--- a/ql/src/test/queries/clientpositive/input_part5.q
+++ b/ql/src/test/queries/clientpositive/input_part5.q
@@ -1,13 +1,13 @@
 set hive.mapred.mode=nonstrict;
 
-create table tmptable(key string, value string, hr string, ds string);
+create table tmptable_n2(key string, value string, hr string, ds string);
 
 EXPLAIN
-insert overwrite table tmptable
+insert overwrite table tmptable_n2
 SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.key < 100;
 
-insert overwrite table tmptable
+insert overwrite table tmptable_n2
 SELECT x.* FROM SRCPART x WHERE x.ds = '2008-04-08' and x.key < 100;
 
-select * from tmptable x sort by x.key,x.value,x.ds,x.hr;
+select * from tmptable_n2 x sort by x.key,x.value,x.ds,x.hr;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input_testxpath.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input_testxpath.q b/ql/src/test/queries/clientpositive/input_testxpath.q
old mode 100755
new mode 100644
index ed22404..ddbbed4
--- a/ql/src/test/queries/clientpositive/input_testxpath.q
+++ b/ql/src/test/queries/clientpositive/input_testxpath.q
@@ -1,10 +1,11 @@
-CREATE TABLE dest1(key INT, value STRING, mapvalue STRING) STORED AS TEXTFILE;
+--! qt:dataset:src_thrift
+CREATE TABLE dest1_n6(key INT, value STRING, mapvalue STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'];
+INSERT OVERWRITE TABLE dest1_n6 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'];
 
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'];
+INSERT OVERWRITE TABLE dest1_n6 SELECT src_thrift.lint[1], src_thrift.lintstring[0].mystring, src_thrift.mstringstring['key_2'];
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n6.* FROM dest1_n6;