You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sp...@apache.org on 2017/04/18 15:10:02 UTC
[1/2] hive git commit: HIVE-16287: Alter table partition rename with
location - moves partition back to hive warehouse (Vihang Karajgaonkar,
reviewed by Sergio Pena, Ying Chen)
Repository: hive
Updated Branches:
refs/heads/branch-2.3 3b19bd9b6 -> d9927aa02
HIVE-16287: Alter table partition rename with location - moves partition back to hive warehouse (Vihang Karajgaonkar, reviewed by Sergio Pena, Ying Chen)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/49f64997
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/49f64997
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/49f64997
Branch: refs/heads/branch-2.3
Commit: 49f64997cf9b5ec7dfceb6963310883ffc803649
Parents: 3b19bd9
Author: Vihang Karajgaonkar <vi...@cloudera.com>
Authored: Mon Apr 17 09:52:40 2017 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Tue Apr 18 10:09:41 2017 -0500
----------------------------------------------------------------------
.../apache/hive/hcatalog/cli/TestPermsGrp.java | 6 +-
.../hive/metastore/TestReplChangeManager.java | 12 ++--
.../hadoop/hive/metastore/HiveAlterHandler.java | 5 +-
.../hadoop/hive/metastore/HiveMetaStore.java | 2 +-
.../apache/hadoop/hive/metastore/Warehouse.java | 69 +++++++++++++++++---
.../hive/ql/parse/ImportSemanticAnalyzer.java | 6 +-
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 2 +-
.../hadoop/hive/ql/parse/TaskCompiler.java | 2 +-
.../hadoop/hive/ql/metadata/TestHive.java | 2 +-
.../clientpositive/rename_partition_location.q | 14 ++++
.../rename_partition_location.q.out | 23 +++++++
11 files changed, 117 insertions(+), 26 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/49f64997/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
index 8aa510f..66a5dd4 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
@@ -116,7 +116,7 @@ public class TestPermsGrp extends TestCase {
Table tbl = getTable(dbName, tblName, typeName);
msc.createTable(tbl);
Database db = Hive.get(hcatConf).getDatabase(dbName);
- Path dfsPath = clientWH.getTablePath(db, tblName);
+ Path dfsPath = clientWH.getDefaultTablePath(db, tblName);
cleanupTbl(dbName, tblName, typeName);
// Next user did specify perms.
@@ -126,7 +126,7 @@ public class TestPermsGrp extends TestCase {
assertTrue(e instanceof ExitException);
assertEquals(((ExitException) e).getStatus(), 0);
}
- dfsPath = clientWH.getTablePath(db, tblName);
+ dfsPath = clientWH.getDefaultTablePath(db, tblName);
assertTrue(dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath).getPermission().equals(FsPermission.valueOf("drwx-wx---")));
cleanupTbl(dbName, tblName, typeName);
@@ -141,7 +141,7 @@ public class TestPermsGrp extends TestCase {
assertTrue(me instanceof ExitException);
}
// No physical dir gets created.
- dfsPath = clientWH.getTablePath(db, tblName);
+ dfsPath = clientWH.getDefaultTablePath(db, tblName);
try {
dfsPath.getFileSystem(hcatConf).getFileStatus(dfsPath);
assert false;
http://git-wip-us.apache.org/repos/asf/hive/blob/49f64997/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
index 1ac4d01..3f9eec3 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
@@ -151,15 +151,15 @@ public class TestReplChangeManager {
Partition part3 = createPartition(dbName, tblName, columns, values, serdeInfo);
client.add_partition(part3);
- Path part1Path = new Path(warehouse.getPartitionPath(db, tblName, ImmutableMap.of("dt", "20160101")), "part");
+ Path part1Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160101")), "part");
createFile(part1Path, "p1");
String path1Chksum = ReplChangeManager.getChksumString(part1Path, fs);
- Path part2Path = new Path(warehouse.getPartitionPath(db, tblName, ImmutableMap.of("dt", "20160102")), "part");
+ Path part2Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160102")), "part");
createFile(part2Path, "p2");
String path2Chksum = ReplChangeManager.getChksumString(part2Path, fs);
- Path part3Path = new Path(warehouse.getPartitionPath(db, tblName, ImmutableMap.of("dt", "20160103")), "part");
+ Path part3Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160103")), "part");
createFile(part3Path, "p3");
String path3Chksum = ReplChangeManager.getChksumString(part3Path, fs);
@@ -221,15 +221,15 @@ public class TestReplChangeManager {
client.createTable(tbl);
- Path filePath1 = new Path(warehouse.getTablePath(db, tblName), "part1");
+ Path filePath1 = new Path(warehouse.getDefaultTablePath(db, tblName), "part1");
createFile(filePath1, "f1");
String fileChksum1 = ReplChangeManager.getChksumString(filePath1, fs);
- Path filePath2 = new Path(warehouse.getTablePath(db, tblName), "part2");
+ Path filePath2 = new Path(warehouse.getDefaultTablePath(db, tblName), "part2");
createFile(filePath2, "f2");
String fileChksum2 = ReplChangeManager.getChksumString(filePath2, fs);
- Path filePath3 = new Path(warehouse.getTablePath(db, tblName), "part3");
+ Path filePath3 = new Path(warehouse.getDefaultTablePath(db, tblName), "part3");
createFile(filePath3, "f3");
String fileChksum3 = ReplChangeManager.getChksumString(filePath3, fs);
http://git-wip-us.apache.org/repos/asf/hive/blob/49f64997/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 4ce6a65..f582a0e 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -442,8 +442,9 @@ public class HiveAlterHandler implements AlterHandler {
msdb.alterPartition(dbname, name, part_vals, new_part);
} else {
try {
- destPath = new Path(wh.getTablePath(msdb.getDatabase(dbname), name),
- Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues()));
+ // if tbl location is available use it
+ // else derive the tbl location from database location
+ destPath = wh.getPartitionPath(msdb.getDatabase(dbname), tbl, new_part.getValues());
destPath = constructRenamedPath(destPath, new Path(new_part.getSd().getLocation()));
} catch (NoSuchObjectException e) {
LOG.debug("Didn't find object in metastore ", e);
http://git-wip-us.apache.org/repos/asf/hive/blob/49f64997/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 80b1e98..4893aee 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -1401,7 +1401,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
if (!TableType.VIRTUAL_VIEW.toString().equals(tbl.getTableType())) {
if (tbl.getSd().getLocation() == null
|| tbl.getSd().getLocation().isEmpty()) {
- tblPath = wh.getTablePath(
+ tblPath = wh.getDefaultTablePath(
ms.getDatabase(tbl.getDbName()), tbl.getTableName());
} else {
if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) {
http://git-wip-us.apache.org/repos/asf/hive/blob/49f64997/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
index a65a2e7..27283d8 100755
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
@@ -151,11 +151,6 @@ public class Warehouse {
return whRoot;
}
- public Path getTablePath(String whRootString, String tableName) throws MetaException {
- Path whRoot = getDnsPath(new Path(whRootString));
- return new Path(whRoot, MetaStoreUtils.encodeTableName(tableName.toLowerCase()));
- }
-
public Path getDatabasePath(Database db) throws MetaException {
if (db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
return getWhRoot();
@@ -170,7 +165,14 @@ public class Warehouse {
return new Path(getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
}
- public Path getTablePath(Database db, String tableName)
+ /**
+ * Returns the default location of the table path using the parent database's location
+ * @param db Database where the table is created
+ * @param tableName table name
+ * @return
+ * @throws MetaException
+ */
+ public Path getDefaultTablePath(Database db, String tableName)
throws MetaException {
return getDnsPath(new Path(getDatabasePath(db), MetaStoreUtils.encodeTableName(tableName.toLowerCase())));
}
@@ -450,16 +452,67 @@ public class Warehouse {
return partSpec;
}
- public Path getPartitionPath(Database db, String tableName,
+ /**
+ * Returns the default partition path of a table within a given database and partition key value
+ * pairs. It uses the database location and appends it the table name and the partition key,value
+ * pairs to create the Path for the partition directory
+ *
+ * @param db - parent database which is used to get the base location of the partition directory
+ * @param tableName - table name for the partitions
+ * @param pm - Partition key value pairs
+ * @return
+ * @throws MetaException
+ */
+ public Path getDefaultPartitionPath(Database db, String tableName,
Map<String, String> pm) throws MetaException {
- return new Path(getTablePath(db, tableName), makePartPath(pm));
+ return getPartitionPath(getDefaultTablePath(db, tableName), pm);
}
+ /**
+ * Returns the path object for the given partition key-value pairs and the base location
+ *
+ * @param tblPath - the base location for the partitions. Typically the table location
+ * @param pm - Partition key value pairs
+ * @return
+ * @throws MetaException
+ */
public Path getPartitionPath(Path tblPath, Map<String, String> pm)
throws MetaException {
return new Path(tblPath, makePartPath(pm));
}
+ /**
+ * Given a database, a table and the partition key value pairs this method returns the Path object
+ * corresponding to the partition key value pairs. It uses the table location if available else
+ * uses the database location for constructing the path corresponding to the partition key-value
+ * pairs
+ *
+ * @param db - Parent database of the given table
+ * @param table - Table for which the partition key-values are given
+ * @param vals - List of values for the partition keys
+ * @return Path corresponding to the partition key-value pairs
+ * @throws MetaException
+ */
+ public Path getPartitionPath(Database db, Table table, List<String> vals)
+ throws MetaException {
+ List<FieldSchema> partKeys = table.getPartitionKeys();
+ if (partKeys == null || (partKeys.size() != vals.size())) {
+ throw new MetaException("Invalid number of partition keys found for " + table.getTableName());
+ }
+ Map<String, String> pm = new LinkedHashMap<>(vals.size());
+ int i = 0;
+ for (FieldSchema key : partKeys) {
+ pm.put(key.getName(), vals.get(i));
+ i++;
+ }
+
+ if (table.getSd().getLocation() != null) {
+ return getPartitionPath(getDnsPath(new Path(table.getSd().getLocation())), pm);
+ } else {
+ return getDefaultPartitionPath(db, table.getTableName(), pm);
+ }
+ }
+
public boolean isDir(Path f) throws MetaException {
FileSystem fs = null;
try {
http://git-wip-us.apache.org/repos/asf/hive/blob/49f64997/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 245c483..ec602ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -441,7 +441,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
} else {
Database parentDb = x.getHive().getDatabase(tblDesc.getDatabaseName());
tgtPath = new Path(
- wh.getTablePath( parentDb, tblDesc.getTableName()),
+ wh.getDefaultTablePath( parentDb, tblDesc.getTableName()),
Warehouse.makePartPath(partSpec.getPartSpec()));
}
} else {
@@ -768,7 +768,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
if (tblDesc.getLocation() != null) {
tablePath = new Path(tblDesc.getLocation());
} else {
- tablePath = wh.getTablePath(parentDb, tblDesc.getTableName());
+ tablePath = wh.getDefaultTablePath(parentDb, tblDesc.getTableName());
}
FileSystem tgtFs = FileSystem.get(tablePath.toUri(), x.getConf());
checkTargetLocationEmpty(tgtFs, tablePath, replicationSpec,x);
@@ -821,7 +821,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
}
if (tblDesc.getLocation() == null) {
if (!waitOnPrecursor){
- tblDesc.setLocation(wh.getTablePath(parentDb, tblDesc.getTableName()).toString());
+ tblDesc.setLocation(wh.getDefaultTablePath(parentDb, tblDesc.getTableName()).toString());
} else {
tblDesc.setLocation(
wh.getDnsPath(new Path(
http://git-wip-us.apache.org/repos/asf/hive/blob/49f64997/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index b80d9fc..d2aa47b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -7262,7 +7262,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
String tName = Utilities.getDbTableName(tableDesc.getTableName())[1];
try {
Warehouse wh = new Warehouse(conf);
- tlocation = wh.getTablePath(db.getDatabase(tableDesc.getDatabaseName()), tName);
+ tlocation = wh.getDefaultTablePath(db.getDatabase(tableDesc.getDatabaseName()), tName);
} catch (MetaException|HiveException e) {
throw new SemanticException(e);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/49f64997/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index 5f9ccc8..7caeb78 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -257,7 +257,7 @@ public abstract class TaskCompiler {
+ " does not exist.");
}
Warehouse wh = new Warehouse(conf);
- targetPath = wh.getTablePath(db.getDatabase(names[0]), names[1]);
+ targetPath = wh.getDefaultTablePath(db.getDatabase(names[0]), names[1]);
} catch (HiveException e) {
throw new SemanticException(e);
} catch (MetaException e) {
http://git-wip-us.apache.org/repos/asf/hive/blob/49f64997/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index ccc0f9c..91eb033 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -318,7 +318,7 @@ public class TestHive extends TestCase {
assertEquals("Table retention didn't match for table: " + tableName,
tbl.getRetention(), ft.getRetention());
assertEquals("Data location is not set correctly",
- wh.getTablePath(hm.getDatabase(DEFAULT_DATABASE_NAME), tableName).toString(),
+ wh.getDefaultTablePath(hm.getDatabase(DEFAULT_DATABASE_NAME), tableName).toString(),
ft.getDataLocation().toString());
// now that URI and times are set correctly, set the original table's uri and times
// and then compare the two tables
http://git-wip-us.apache.org/repos/asf/hive/blob/49f64997/ql/src/test/queries/clientpositive/rename_partition_location.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/rename_partition_location.q b/ql/src/test/queries/clientpositive/rename_partition_location.q
index ee4ff81..bf8baca 100644
--- a/ql/src/test/queries/clientpositive/rename_partition_location.q
+++ b/ql/src/test/queries/clientpositive/rename_partition_location.q
@@ -17,4 +17,18 @@ SELECT count(*) FROM rename_partition_table where part = '2';
SET hive.exec.post.hooks=;
+CREATE TABLE rename_partition_table_2 (key STRING, value STRING) PARTITIONED BY (part STRING)
+LOCATION '${system:test.tmp.dir}/rename_partition_table_2';
+
+INSERT OVERWRITE TABLE rename_partition_table_2 PARTITION (part = '1') SELECT * FROM src;
+
+ALTER TABLE rename_partition_table_2 PARTITION (part = '1') RENAME TO PARTITION (part = '2');
+
+SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyPartitionIsSubdirectoryOfTableHook;
+
+SELECT count(*) FROM rename_partition_table where part = '2';
+
+SET hive.exec.post.hooks=;
+
DROP TABLE rename_partition_table;
+DROP TABLE rename_partition_table_2;
http://git-wip-us.apache.org/repos/asf/hive/blob/49f64997/ql/src/test/results/clientpositive/rename_partition_location.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rename_partition_location.q.out b/ql/src/test/results/clientpositive/rename_partition_location.q.out
index bfdd580..55c31bc 100644
--- a/ql/src/test/results/clientpositive/rename_partition_location.q.out
+++ b/ql/src/test/results/clientpositive/rename_partition_location.q.out
@@ -46,7 +46,30 @@ PREHOOK: type: QUERY
PREHOOK: Input: default@rename_partition_table
#### A masked pattern was here ####
500
+PREHOOK: query: CREATE TABLE rename_partition_table_2 (key STRING, value STRING) PARTITIONED BY (part STRING)
+#### A masked pattern was here ####
+PREHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+PREHOOK: Output: database:default
+PREHOOK: Output: default@rename_partition_table_2
+PREHOOK: query: INSERT OVERWRITE TABLE rename_partition_table_2 PARTITION (part = '1') SELECT * FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@rename_partition_table_2@part=1
+PREHOOK: query: ALTER TABLE rename_partition_table_2 PARTITION (part = '1') RENAME TO PARTITION (part = '2')
+PREHOOK: type: ALTERTABLE_RENAMEPART
+PREHOOK: Input: default@rename_partition_table_2
+PREHOOK: Output: default@rename_partition_table_2@part=1
+PREHOOK: query: SELECT count(*) FROM rename_partition_table where part = '2'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@rename_partition_table
+#### A masked pattern was here ####
+500
PREHOOK: query: DROP TABLE rename_partition_table
PREHOOK: type: DROPTABLE
PREHOOK: Input: default@rename_partition_table
PREHOOK: Output: default@rename_partition_table
+PREHOOK: query: DROP TABLE rename_partition_table_2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@rename_partition_table_2
+PREHOOK: Output: default@rename_partition_table_2
[2/2] hive git commit: HIVE-16287: Alter table partition rename with
location - moves partition back to hive warehouse (Vihang Karajgaonkar,
reviewed by Sergio Pena, Ying Chen) (addendum I)
Posted by sp...@apache.org.
HIVE-16287: Alter table partition rename with location - moves partition back to hive warehouse (Vihang Karajgaonkar, reviewed by Sergio Pena, Ying Chen) (addendum I)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d9927aa0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d9927aa0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d9927aa0
Branch: refs/heads/branch-2.3
Commit: d9927aa02546d3b41987c22856c5644c56df8f4d
Parents: 49f6499
Author: Vihang Karajgaonkar <vi...@cloudera.com>
Authored: Mon Apr 17 13:43:58 2017 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Tue Apr 18 10:09:48 2017 -0500
----------------------------------------------------------------------
ql/src/test/queries/clientpositive/rename_partition_location.q | 2 +-
.../test/results/clientpositive/rename_partition_location.q.out | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/d9927aa0/ql/src/test/queries/clientpositive/rename_partition_location.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/rename_partition_location.q b/ql/src/test/queries/clientpositive/rename_partition_location.q
index bf8baca..583c10a 100644
--- a/ql/src/test/queries/clientpositive/rename_partition_location.q
+++ b/ql/src/test/queries/clientpositive/rename_partition_location.q
@@ -26,7 +26,7 @@ ALTER TABLE rename_partition_table_2 PARTITION (part = '1') RENAME TO PARTITION
SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyPartitionIsSubdirectoryOfTableHook;
-SELECT count(*) FROM rename_partition_table where part = '2';
+SELECT count(*) FROM rename_partition_table_2 where part = '2';
SET hive.exec.post.hooks=;
http://git-wip-us.apache.org/repos/asf/hive/blob/d9927aa0/ql/src/test/results/clientpositive/rename_partition_location.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rename_partition_location.q.out b/ql/src/test/results/clientpositive/rename_partition_location.q.out
index 55c31bc..4fbc5de 100644
--- a/ql/src/test/results/clientpositive/rename_partition_location.q.out
+++ b/ql/src/test/results/clientpositive/rename_partition_location.q.out
@@ -60,9 +60,9 @@ PREHOOK: query: ALTER TABLE rename_partition_table_2 PARTITION (part = '1') RENA
PREHOOK: type: ALTERTABLE_RENAMEPART
PREHOOK: Input: default@rename_partition_table_2
PREHOOK: Output: default@rename_partition_table_2@part=1
-PREHOOK: query: SELECT count(*) FROM rename_partition_table where part = '2'
+PREHOOK: query: SELECT count(*) FROM rename_partition_table_2 where part = '2'
PREHOOK: type: QUERY
-PREHOOK: Input: default@rename_partition_table
+PREHOOK: Input: default@rename_partition_table_2
#### A masked pattern was here ####
500
PREHOOK: query: DROP TABLE rename_partition_table