You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2017/12/06 21:20:17 UTC
[02/12] hive git commit: HIVE-17980 Move HiveMetaStoreClient plus a
few remaining classes. This closes #272 (Alan Gates, reviewed by Daniel Dai)
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
new file mode 100644
index 0000000..24c59f2
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -0,0 +1,1053 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+import org.apache.hadoop.hive.metastore.api.WMTrigger;
+import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.api.WMMapping;
+import org.apache.hadoop.hive.metastore.api.WMPool;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.thrift.TException;
+
+/**
+ * A wrapper around {@link org.apache.hadoop.hive.metastore.ObjectStore}
+ * with the ability to control the result of commitTransaction().
+ * All other functions simply delegate to an embedded ObjectStore object.
+ * Ideally, we should have just extended ObjectStore instead of using
+ * delegation. However, since HiveMetaStore uses a Proxy, this class must
+ * not inherit from any other class.
+ */
+public class DummyRawStoreControlledCommit implements RawStore, Configurable {
+
+ private final ObjectStore objectStore;
+ public DummyRawStoreControlledCommit() {
+ objectStore = new ObjectStore();
+ }
+
+ /**
+ * If true, shouldCommit() will simply call delegate commitTransaction() to the
+ * underlying ObjectStore.
+ * If false, shouldCommit() immediately returns false.
+ */
+ private static boolean shouldCommitSucceed = true;
+ public static void setCommitSucceed(boolean flag) {
+ shouldCommitSucceed = flag;
+ }
+
+ @Override
+ public boolean commitTransaction() {
+ if (shouldCommitSucceed) {
+ return objectStore.commitTransaction();
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public boolean isActiveTransaction() {
+ return false;
+ }
+
+ // All remaining functions simply delegate to objectStore
+
+ @Override
+ public Configuration getConf() {
+ return objectStore.getConf();
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ objectStore.setConf(conf);
+ }
+
+ @Override
+ public void shutdown() {
+ objectStore.shutdown();
+ }
+
+ @Override
+ public boolean openTransaction() {
+ return objectStore.openTransaction();
+ }
+
+ @Override
+ public void rollbackTransaction() {
+ objectStore.rollbackTransaction();
+ }
+
+ @Override
+ public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+ objectStore.createDatabase(db);
+ }
+
+ @Override
+ public Database getDatabase(String dbName) throws NoSuchObjectException {
+ return objectStore.getDatabase(dbName);
+ }
+
+ @Override
+ public boolean dropDatabase(String dbName)
+ throws NoSuchObjectException, MetaException {
+ return objectStore.dropDatabase(dbName);
+ }
+
+ @Override
+ public boolean alterDatabase(String dbName, Database db)
+ throws NoSuchObjectException, MetaException {
+
+ return objectStore.alterDatabase(dbName, db);
+ }
+
+ @Override
+ public List<String> getDatabases(String pattern) throws MetaException {
+ return objectStore.getDatabases(pattern);
+ }
+
+ @Override
+ public List<String> getAllDatabases() throws MetaException {
+ return objectStore.getAllDatabases();
+ }
+
+ @Override
+ public boolean createType(Type type) {
+ return objectStore.createType(type);
+ }
+
+ @Override
+ public Type getType(String typeName) {
+ return objectStore.getType(typeName);
+ }
+
+ @Override
+ public boolean dropType(String typeName) {
+ return objectStore.dropType(typeName);
+ }
+
+ @Override
+ public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+ objectStore.createTable(tbl);
+ }
+
+ @Override
+ public boolean dropTable(String dbName, String tableName)
+ throws MetaException, NoSuchObjectException,
+ InvalidObjectException, InvalidInputException {
+ return objectStore.dropTable(dbName, tableName);
+ }
+
+ @Override
+ public Table getTable(String dbName, String tableName) throws MetaException {
+ return objectStore.getTable(dbName, tableName);
+ }
+
+ @Override
+ public boolean addPartition(Partition part)
+ throws InvalidObjectException, MetaException {
+ return objectStore.addPartition(part);
+ }
+
+ @Override
+ public Partition getPartition(String dbName, String tableName, List<String> partVals)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartition(dbName, tableName, partVals);
+ }
+
+ @Override
+ public boolean dropPartition(String dbName, String tableName, List<String> partVals)
+ throws MetaException, NoSuchObjectException,
+ InvalidObjectException, InvalidInputException {
+ return objectStore.dropPartition(dbName, tableName, partVals);
+ }
+
+ @Override
+ public List<Partition> getPartitions(String dbName, String tableName, int max)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartitions(dbName, tableName, max);
+ }
+
+ @Override
+ public void alterTable(String dbName, String name, Table newTable)
+ throws InvalidObjectException, MetaException {
+ objectStore.alterTable(dbName, name, newTable);
+ }
+
+ @Override
+ public List<String> getTables(String dbName, String pattern) throws MetaException {
+ return objectStore.getTables(dbName, pattern);
+ }
+
+ @Override
+ public List<String> getTables(String dbName, String pattern, TableType tableType) throws MetaException {
+ return objectStore.getTables(dbName, pattern, tableType);
+ }
+
+ @Override
+ public List<TableMeta> getTableMeta(String dbNames, String tableNames, List<String> tableTypes)
+ throws MetaException {
+ return objectStore.getTableMeta(dbNames, tableNames, tableTypes);
+ }
+
+ @Override
+ public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
+ throws MetaException, UnknownDBException {
+ return objectStore.getTableObjectsByName(dbName, tableNames);
+ }
+
+ @Override
+ public List<String> getAllTables(String dbName) throws MetaException {
+ return objectStore.getAllTables(dbName);
+ }
+
+ @Override
+ public List<String> listTableNamesByFilter(String dbName, String filter,
+ short maxTables) throws MetaException, UnknownDBException {
+ return objectStore.listTableNamesByFilter(dbName, filter, maxTables);
+ }
+
+ @Override
+ public List<String> listPartitionNames(String dbName, String tblName, short maxParts)
+ throws MetaException {
+ return objectStore.listPartitionNames(dbName, tblName, maxParts);
+ }
+
+ @Override
+ public PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, List<FieldSchema> cols, boolean applyDistinct, String filter, boolean ascending, List<FieldSchema> order, long maxParts) throws MetaException {
+ return null;
+ }
+
+ @Override
+ public List<String> listPartitionNamesByFilter(String dbName, String tblName,
+ String filter, short maxParts) throws MetaException {
+ return objectStore.listPartitionNamesByFilter(dbName, tblName, filter, maxParts);
+ }
+
+ @Override
+ public void alterPartition(String dbName, String tblName, List<String> partVals,
+ Partition newPart) throws InvalidObjectException, MetaException {
+ objectStore.alterPartition(dbName, tblName, partVals, newPart);
+ }
+
+ @Override
+ public void alterPartitions(String dbName, String tblName,
+ List<List<String>> partValsList, List<Partition> newParts)
+ throws InvalidObjectException, MetaException {
+ objectStore.alterPartitions(dbName, tblName, partValsList, newParts);
+ }
+
+ @Override
+ public boolean addIndex(Index index) throws InvalidObjectException, MetaException {
+ return objectStore.addIndex(index);
+ }
+
+ @Override
+ public Index getIndex(String dbName, String origTableName, String indexName)
+ throws MetaException {
+ return objectStore.getIndex(dbName, origTableName, indexName);
+ }
+
+ @Override
+ public boolean dropIndex(String dbName, String origTableName, String indexName)
+ throws MetaException {
+ return objectStore.dropIndex(dbName, origTableName, indexName);
+ }
+
+ @Override
+ public List<Index> getIndexes(String dbName, String origTableName, int max)
+ throws MetaException {
+ return objectStore.getIndexes(dbName, origTableName, max);
+ }
+
+ @Override
+ public List<String> listIndexNames(String dbName, String origTableName, short max)
+ throws MetaException {
+ return objectStore.listIndexNames(dbName, origTableName, max);
+ }
+
+ @Override
+ public void alterIndex(String dbName, String baseTblName, String name, Index newIndex)
+ throws InvalidObjectException, MetaException {
+ objectStore.alterIndex(dbName, baseTblName, name, newIndex);
+ }
+
+ @Override
+ public List<Partition> getPartitionsByFilter(String dbName, String tblName,
+ String filter, short maxParts) throws MetaException, NoSuchObjectException {
+ return objectStore.getPartitionsByFilter(dbName, tblName, filter, maxParts);
+ }
+
+ @Override
+ public int getNumPartitionsByFilter(String dbName, String tblName,
+ String filter) throws MetaException, NoSuchObjectException {
+ return objectStore.getNumPartitionsByFilter(dbName, tblName, filter);
+ }
+
+ @Override
+ public int getNumPartitionsByExpr(String dbName, String tblName,
+ byte[] expr) throws MetaException, NoSuchObjectException {
+ return objectStore.getNumPartitionsByExpr(dbName, tblName, expr);
+ }
+
+ @Override
+ public List<Partition> getPartitionsByNames(String dbName, String tblName,
+ List<String> partNames) throws MetaException, NoSuchObjectException {
+ return objectStore.getPartitionsByNames(dbName, tblName, partNames);
+ }
+
+ @Override
+ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
+ String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
+ return objectStore.getPartitionsByExpr(
+ dbName, tblName, expr, defaultPartitionName, maxParts, result);
+ }
+
+ @Override
+ public Table markPartitionForEvent(String dbName, String tblName,
+ Map<String, String> partVals, PartitionEventType evtType)
+ throws MetaException, UnknownTableException, InvalidPartitionException,
+ UnknownPartitionException {
+ return objectStore.markPartitionForEvent(dbName, tblName, partVals, evtType);
+ }
+
+ @Override
+ public boolean isPartitionMarkedForEvent(String dbName, String tblName,
+ Map<String, String> partName, PartitionEventType evtType)
+ throws MetaException, UnknownTableException, InvalidPartitionException,
+ UnknownPartitionException {
+ return objectStore.isPartitionMarkedForEvent(dbName, tblName, partName, evtType);
+ }
+
+ @Override
+ public boolean addRole(String rowName, String ownerName) throws InvalidObjectException,
+ MetaException, NoSuchObjectException {
+ return objectStore.addRole(rowName, ownerName);
+ }
+
+ @Override
+ public boolean removeRole(String roleName)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.removeRole(roleName);
+ }
+
+ @Override
+ public boolean grantRole(Role role, String userName, PrincipalType principalType,
+ String grantor, PrincipalType grantorType, boolean grantOption)
+ throws MetaException, NoSuchObjectException, InvalidObjectException {
+ return objectStore.grantRole(role, userName, principalType, grantor, grantorType,
+ grantOption);
+ }
+
+ @Override
+ public boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.revokeRole(role, userName, principalType, grantOption);
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
+ List<String> groupNames) throws InvalidObjectException, MetaException {
+ return objectStore.getUserPrivilegeSet(userName, groupNames);
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName,
+ List<String> groupNames) throws InvalidObjectException, MetaException {
+ return objectStore.getDBPrivilegeSet(dbName, userName, groupNames);
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName,
+ String userName, List<String> groupNames)
+ throws InvalidObjectException, MetaException {
+ return objectStore.getTablePrivilegeSet(dbName, tableName, userName, groupNames);
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName,
+ String partition, String userName, List<String> groupNames)
+ throws InvalidObjectException, MetaException {
+ return objectStore.getPartitionPrivilegeSet(dbName, tableName, partition,
+ userName, groupNames);
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName,
+ String partitionName, String columnName, String userName, List<String> groupNames)
+ throws InvalidObjectException, MetaException {
+ return objectStore.getColumnPrivilegeSet(dbName, tableName, partitionName,
+ columnName, userName, groupNames);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
+ PrincipalType principalType) {
+ return objectStore.listPrincipalGlobalGrants(principalName, principalType);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+ PrincipalType principalType, String dbName) {
+ return objectStore.listPrincipalDBGrants(principalName, principalType, dbName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
+ PrincipalType principalType, String dbName, String tableName) {
+ return objectStore.listAllTableGrants(principalName, principalType,
+ dbName, tableName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalPartitionGrants(String principalName,
+ PrincipalType principalType, String dbName, String tableName, List<String> partValues,
+ String partName) {
+ return objectStore.listPrincipalPartitionGrants(principalName, principalType,
+ dbName, tableName, partValues, partName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(String principalName,
+ PrincipalType principalType, String dbName, String tableName, String columnName) {
+ return objectStore.listPrincipalTableColumnGrants(principalName, principalType,
+ dbName, tableName, columnName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(
+ String principalName, PrincipalType principalType, String dbName, String tableName,
+ List<String> partVals, String partName, String columnName) {
+ return objectStore.listPrincipalPartitionColumnGrants(principalName, principalType,
+ dbName, tableName, partVals, partName, columnName);
+ }
+
+ @Override
+ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException,
+ MetaException, NoSuchObjectException {
+ return objectStore.grantPrivileges(privileges);
+ }
+
+ @Override
+ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+ throws InvalidObjectException, MetaException, NoSuchObjectException {
+ return objectStore.revokePrivileges(privileges, grantOption);
+ }
+
+ @Override
+ public Role getRole(String roleName) throws NoSuchObjectException {
+ return objectStore.getRole(roleName);
+ }
+
+ @Override
+ public List<String> listRoleNames() {
+ return objectStore.listRoleNames();
+ }
+
+ @Override
+ public List<Role> listRoles(String principalName, PrincipalType principalType) {
+ return objectStore.listRoles(principalName, principalType);
+ }
+
+ @Override
+ public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+ PrincipalType principalType) {
+ return objectStore.listRolesWithGrants(principalName, principalType);
+ }
+
+ @Override
+ public List<RolePrincipalGrant> listRoleMembers(String roleName) {
+ return objectStore.listRoleMembers(roleName);
+ }
+
+ @Override
+ public Partition getPartitionWithAuth(String dbName, String tblName,
+ List<String> partVals, String userName, List<String> groupNames)
+ throws MetaException, NoSuchObjectException, InvalidObjectException {
+ return objectStore.getPartitionWithAuth(dbName, tblName, partVals, userName,
+ groupNames);
+ }
+
+ @Override
+ public List<Partition> getPartitionsWithAuth(String dbName, String tblName,
+ short maxParts, String userName, List<String> groupNames)
+ throws MetaException, NoSuchObjectException, InvalidObjectException {
+ return objectStore.getPartitionsWithAuth(dbName, tblName, maxParts, userName,
+ groupNames);
+ }
+
+ @Override
+ public List<String> listPartitionNamesPs(String dbName, String tblName,
+ List<String> partVals, short maxParts)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts);
+ }
+
+ @Override
+ public List<Partition> listPartitionsPsWithAuth(String dbName, String tblName,
+ List<String> partVals, short maxParts, String userName, List<String> groupNames)
+ throws MetaException, InvalidObjectException, NoSuchObjectException {
+ return objectStore.listPartitionsPsWithAuth(dbName, tblName, partVals, maxParts,
+ userName, groupNames);
+ }
+
+ @Override
+ public long cleanupEvents() {
+ return objectStore.cleanupEvents();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return objectStore.listPrincipalDBGrantsAll(principalName, principalType);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return objectStore.listPrincipalTableGrantsAll(principalName, principalType);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return objectStore.listPrincipalPartitionGrantsAll(principalName, principalType);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return objectStore.listPrincipalTableColumnGrantsAll(principalName, principalType);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return objectStore.listPrincipalPartitionColumnGrantsAll(principalName, principalType);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listGlobalGrantsAll() {
+ return objectStore.listGlobalGrantsAll();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listDBGrantsAll(String dbName) {
+ return objectStore.listDBGrantsAll(dbName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String dbName, String tableName,
+ String partitionName, String columnName) {
+ return objectStore.listPartitionColumnGrantsAll(dbName, tableName, partitionName, columnName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listTableGrantsAll(String dbName, String tableName) {
+ return objectStore.listTableGrantsAll(dbName, tableName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPartitionGrantsAll(String dbName, String tableName,
+ String partitionName) {
+ return objectStore.listPartitionGrantsAll(dbName, tableName, partitionName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listTableColumnGrantsAll(String dbName, String tableName,
+ String columnName) {
+ return objectStore.listTableColumnGrantsAll(dbName, tableName, columnName);
+ }
+
+ @Override
+ public ColumnStatistics getTableColumnStatistics(String dbName, String tableName,
+ List<String> colNames) throws MetaException, NoSuchObjectException {
+ return objectStore.getTableColumnStatistics(dbName, tableName, colNames);
+ }
+
+ @Override
+ public boolean deleteTableColumnStatistics(String dbName, String tableName,
+ String colName)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
+ InvalidInputException {
+ return objectStore.deleteTableColumnStatistics(dbName, tableName, colName);
+ }
+
+ @Override
+ public boolean deletePartitionColumnStatistics(String dbName, String tableName,
+ String partName, List<String> partVals, String colName)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
+ InvalidInputException {
+ return objectStore.deletePartitionColumnStatistics(dbName, tableName, partName,
+ partVals, colName);
+ }
+
+ @Override
+ public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
+ InvalidInputException {
+ return objectStore.updateTableColumnStatistics(statsObj);
+ }
+
+ @Override
+ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
+ List<String> partVals)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
+ InvalidInputException {
+ return objectStore.updatePartitionColumnStatistics(statsObj, partVals);
+ }
+
+ @Override
+ public boolean addToken(String tokenIdentifier, String delegationToken) {
+ return false;
+ }
+
+ @Override
+ public boolean removeToken(String tokenIdentifier) {
+ return false;
+ }
+
+ @Override
+ public String getToken(String tokenIdentifier) {
+ return "";
+ }
+
+ @Override
+ public List<String> getAllTokenIdentifiers() {
+ return new ArrayList<>();
+ }
+
+ @Override
+ public int addMasterKey(String key) throws MetaException {
+ return -1;
+ }
+
+ @Override
+ public void updateMasterKey(Integer seqNo, String key)
+ throws NoSuchObjectException, MetaException {}
+
+ @Override
+ public boolean removeMasterKey(Integer keySeq) {
+ return false;
+ }
+
+ @Override
+ public String[] getMasterKeys() {
+ return new String[0];
+ }
+
+ @Override
+ public void verifySchema() throws MetaException {
+ }
+
+ @Override
+ public String getMetaStoreSchemaVersion() throws MetaException {
+ return objectStore.getMetaStoreSchemaVersion();
+ }
+
+ @Override
+ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) throws MetaException {
+ objectStore.setMetaStoreSchemaVersion(schemaVersion, comment);
+
+ }
+
+ @Override
+ public List<ColumnStatistics> getPartitionColumnStatistics(String dbName,
+ String tblName, List<String> colNames, List<String> partNames)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartitionColumnStatistics(dbName, tblName , colNames, partNames);
+ }
+
+ @Override
+ public boolean doesPartitionExist(String dbName, String tableName,
+ List<String> partVals) throws MetaException, NoSuchObjectException {
+ return objectStore.doesPartitionExist(dbName, tableName, partVals);
+ }
+
+ @Override
+ public boolean addPartitions(String dbName, String tblName, List<Partition> parts)
+ throws InvalidObjectException, MetaException {
+ return objectStore.addPartitions(dbName, tblName, parts);
+ }
+
+ @Override
+ public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
+ return false;
+ }
+
+ @Override
+ public void dropPartitions(String dbName, String tblName, List<String> partNames)
+ throws MetaException, NoSuchObjectException {
+ objectStore.dropPartitions(dbName, tblName, partNames);
+ }
+
+ @Override
+ public void createFunction(Function func) throws InvalidObjectException,
+ MetaException {
+ objectStore.createFunction(func);
+ }
+
+ @Override
+ public void alterFunction(String dbName, String funcName, Function newFunction)
+ throws InvalidObjectException, MetaException {
+ objectStore.alterFunction(dbName, funcName, newFunction);
+ }
+
+ @Override
+ public void dropFunction(String dbName, String funcName)
+ throws MetaException, NoSuchObjectException, InvalidObjectException,
+ InvalidInputException {
+ objectStore.dropFunction(dbName, funcName);
+ }
+
+ @Override
+ public Function getFunction(String dbName, String funcName)
+ throws MetaException {
+ return objectStore.getFunction(dbName, funcName);
+ }
+
+ @Override
+ public List<Function> getAllFunctions()
+ throws MetaException {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<String> getFunctions(String dbName, String pattern)
+ throws MetaException {
+ return objectStore.getFunctions(dbName, pattern);
+ }
+
+ @Override
+ public AggrStats get_aggr_stats_for(String dbName,
+ String tblName, List<String> partNames, List<String> colNames)
+ throws MetaException {
+ return null;
+ }
+
+ @Override
+ public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+ return objectStore.getNextNotification(rqst);
+ }
+
+ @Override
+ public void addNotificationEvent(NotificationEvent event) {
+ objectStore.addNotificationEvent(event);
+ }
+
+ @Override
+ public void cleanNotificationEvents(int olderThan) {
+ objectStore.cleanNotificationEvents(olderThan);
+ }
+
+ @Override
+ public CurrentNotificationEventId getCurrentNotificationEventId() {
+ return objectStore.getCurrentNotificationEventId();
+ }
+
+ @Override
+ public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) {
+ return objectStore.getNotificationEventsCount(rqst);
+ }
+
+ @Override
+ public void flushCache() {
+ objectStore.flushCache();
+ }
+
+ @Override
+ public ByteBuffer[] getFileMetadata(List<Long> fileIds) {
+ return null;
+ }
+
+ @Override
+ public void putFileMetadata(
+ List<Long> fileIds, List<ByteBuffer> metadata, FileMetadataExprType type) {
+ }
+
+ @Override
+ public boolean isFileMetadataSupported() {
+ return false;
+ }
+
+
+ @Override
+ public void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
+ ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
+ }
+
+ @Override
+ public int getTableCount() throws MetaException {
+ return objectStore.getTableCount();
+ }
+
+ @Override
+ public int getPartitionCount() throws MetaException {
+ return objectStore.getPartitionCount();
+ }
+
+ @Override
+ public int getDatabaseCount() throws MetaException {
+ return objectStore.getDatabaseCount();
+ }
+
+ @Override
+ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) {
+ return null;
+ }
+
+ @Override
+ public List<SQLPrimaryKey> getPrimaryKeys(String db_name, String tbl_name)
+ throws MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public List<SQLForeignKey> getForeignKeys(String parent_db_name,
+ String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
+ throws MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public List<SQLUniqueConstraint> getUniqueConstraints(String db_name, String tbl_name)
+ throws MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public List<SQLNotNullConstraint> getNotNullConstraints(String db_name, String tbl_name)
+ throws MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public List<String> createTableWithConstraints(Table tbl,
+ List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+ List<SQLUniqueConstraint> uniqueConstraints,
+ List<SQLNotNullConstraint> notNullConstraints)
+ throws InvalidObjectException, MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public void dropConstraint(String dbName, String tableName,
+ String constraintName) throws NoSuchObjectException {
+ // TODO Auto-generated method stub
+ }
+
+ @Override
+ public List<String> addPrimaryKeys(List<SQLPrimaryKey> pks)
+ throws InvalidObjectException, MetaException {
+ return null;
+ }
+
+ @Override
+ public List<String> addForeignKeys(List<SQLForeignKey> fks)
+ throws InvalidObjectException, MetaException {
+ return null;
+ }
+
+ @Override
+ public List<String> addUniqueConstraints(List<SQLUniqueConstraint> uks)
+ throws InvalidObjectException, MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public List<String> addNotNullConstraints(List<SQLNotNullConstraint> nns)
+ throws InvalidObjectException, MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public Map<String, List<ColumnStatisticsObj>> getColStatsForTablePartitions(String dbName,
+ String tableName) throws MetaException, NoSuchObjectException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public String getMetastoreDbUuid() throws MetaException {
+ throw new MetaException("Get metastore uuid is not implemented");
+ }
+
+ @Override
+ public void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize)
+ throws AlreadyExistsException, InvalidObjectException, MetaException {
+ objectStore.createResourcePlan(resourcePlan, defaultPoolSize);
+ }
+
+ @Override
+ public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException {
+ return objectStore.getResourcePlan(name);
+ }
+
+ @Override
+ public List<WMResourcePlan> getAllResourcePlans() throws MetaException {
+ return objectStore.getAllResourcePlans();
+ }
+
+ @Override
+ public WMFullResourcePlan alterResourcePlan(String name, WMResourcePlan resourcePlan,
+ boolean canActivateDisabled)
+ throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+ MetaException {
+ return objectStore.alterResourcePlan(name, resourcePlan, canActivateDisabled);
+ }
+
+ @Override
+ public WMFullResourcePlan getActiveResourcePlan() throws MetaException {
+ return objectStore.getActiveResourcePlan();
+ }
+
+ @Override
+ public List<String> validateResourcePlan(String name)
+ throws NoSuchObjectException, InvalidObjectException, MetaException {
+ return objectStore.validateResourcePlan(name);
+ }
+
+ @Override
+ public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException {
+ objectStore.dropResourcePlan(name);
+ }
+
+ @Override
+ public void createWMTrigger(WMTrigger trigger)
+ throws AlreadyExistsException, MetaException, NoSuchObjectException,
+ InvalidOperationException {
+ objectStore.createWMTrigger(trigger);
+ }
+
+ @Override
+ public void alterWMTrigger(WMTrigger trigger)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.alterWMTrigger(trigger);
+ }
+
+ @Override
+ public void dropWMTrigger(String resourcePlanName, String triggerName)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.dropWMTrigger(resourcePlanName, triggerName);
+ }
+
+ @Override
+ public List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
+ throws NoSuchObjectException, MetaException {
+ return objectStore.getTriggersForResourcePlan(resourcePlanName);
+ }
+
+ @Override
+ public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidOperationException, MetaException {
+ objectStore.createPool(pool);
+ }
+
+ @Override
+ public void alterPool(WMPool pool, String poolPath) throws AlreadyExistsException,
+ NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.alterPool(pool, poolPath);
+ }
+
+ @Override
+ public void dropWMPool(String resourcePlanName, String poolPath)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.dropWMPool(resourcePlanName, poolPath);
+ }
+
+ @Override
+ public void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+ throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+ MetaException {
+ objectStore.createOrUpdateWMMapping(mapping, update);
+ }
+
+ @Override
+ public void dropWMMapping(WMMapping mapping)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.dropWMMapping(mapping);
+ }
+
+ @Override
+ public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath) throws AlreadyExistsException, NoSuchObjectException,
+ InvalidOperationException, MetaException {
+ objectStore.createWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+ }
+
+ @Override
+ public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+ String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
+ objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
new file mode 100644
index 0000000..d7a40b6
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.List;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+import static org.junit.Assert.assertEquals;
+
+
+/**
+ * A wrapper around {@link ObjectStore} that allows us to inject custom behaviour
+ * on to some of the methods for testing.
+ */
+public class InjectableBehaviourObjectStore extends ObjectStore {
+ public InjectableBehaviourObjectStore() {
+ super();
+ }
+
+ /**
+ * A utility class that allows people injecting behaviour to determine if their injections occurred.
+ */
+ public static abstract class BehaviourInjection<T,F>
+ implements com.google.common.base.Function<T,F>{
+ protected boolean injectionPathCalled = false;
+ protected boolean nonInjectedPathCalled = false;
+
+ public void assertInjectionsPerformed(
+ boolean expectedInjectionCalled, boolean expectedNonInjectedPathCalled){
+ assertEquals(expectedInjectionCalled, injectionPathCalled);
+ assertEquals(expectedNonInjectedPathCalled, nonInjectedPathCalled);
+ }
+ }
+
+ private static com.google.common.base.Function<Table,Table> getTableModifier =
+ com.google.common.base.Functions.identity();
+ private static com.google.common.base.Function<List<String>, List<String>> listPartitionNamesModifier =
+ com.google.common.base.Functions.identity();
+ private static com.google.common.base.Function<NotificationEventResponse, NotificationEventResponse>
+ getNextNotificationModifier = com.google.common.base.Functions.identity();
+
+ // Methods to set/reset getTable modifier
+ public static void setGetTableBehaviour(com.google.common.base.Function<Table,Table> modifier){
+ getTableModifier = (modifier == null)? com.google.common.base.Functions.identity() : modifier;
+ }
+
+ public static void resetGetTableBehaviour(){
+ setGetTableBehaviour(null);
+ }
+
+ // Methods to set/reset listPartitionNames modifier
+ public static void setListPartitionNamesBehaviour(com.google.common.base.Function<List<String>, List<String>> modifier){
+ listPartitionNamesModifier = (modifier == null)? com.google.common.base.Functions.identity() : modifier;
+ }
+
+ public static void resetListPartitionNamesBehaviour(){
+ setListPartitionNamesBehaviour(null);
+ }
+
+ // Methods to set/reset getNextNotification modifier
+ public static void setGetNextNotificationBehaviour(
+ com.google.common.base.Function<NotificationEventResponse,NotificationEventResponse> modifier){
+ getNextNotificationModifier = (modifier == null)? com.google.common.base.Functions.identity() : modifier;
+ }
+
+ public static void resetGetNextNotificationBehaviour(){
+ setGetNextNotificationBehaviour(null);
+ }
+
+ // ObjectStore methods to be overridden with injected behavior
+ @Override
+ public Table getTable(String dbName, String tableName) throws MetaException {
+ return getTableModifier.apply(super.getTable(dbName, tableName));
+ }
+
+ @Override
+ public List<String> listPartitionNames(String dbName, String tableName, short max) throws MetaException {
+ return listPartitionNamesModifier.apply(super.listPartitionNames(dbName, tableName, max));
+ }
+
+ @Override
+ public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+ return getNextNotificationModifier.apply(super.getNextNotification(rqst));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/IpAddressListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/IpAddressListener.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/IpAddressListener.java
new file mode 100644
index 0000000..e7a0d2d
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/IpAddressListener.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+import org.junit.Assert;
+
+/** An implementation for MetaStoreEventListener which checks that the IP Address stored in
+ * HMSHandler matches that of local host, for testing purposes.
+ */
+public class IpAddressListener extends MetaStoreEventListener{
+
+ private static final String LOCAL_HOST = "localhost";
+
+ public IpAddressListener(Configuration config) {
+ super(config);
+ }
+
+ private void checkIpAddress() {
+ try {
+ String localhostIp = InetAddress.getByName(LOCAL_HOST).getHostAddress();
+ Assert.assertEquals(localhostIp, HMSHandler.getThreadLocalIpAddress());
+ } catch (UnknownHostException e) {
+ Assert.assertTrue("InetAddress.getLocalHost threw an exception: " + e.getMessage(), false);
+ }
+ }
+
+ @Override
+ public void onAddPartition(AddPartitionEvent partition) throws MetaException {
+ checkIpAddress();
+ }
+
+ @Override
+ public void onCreateDatabase(CreateDatabaseEvent db) throws MetaException {
+ checkIpAddress();
+ }
+
+ @Override
+ public void onCreateTable(CreateTableEvent table) throws MetaException {
+ checkIpAddress();
+ }
+
+ @Override
+ public void onDropDatabase(DropDatabaseEvent db) throws MetaException {
+ checkIpAddress();
+ }
+
+ @Override
+ public void onDropPartition(DropPartitionEvent partition) throws MetaException {
+ checkIpAddress();
+ }
+
+ @Override
+ public void onDropTable(DropTableEvent table) throws MetaException {
+ checkIpAddress();
+ }
+
+ @Override
+ public void onAlterTable(AlterTableEvent event) throws MetaException {
+ checkIpAddress();
+ }
+
+ @Override
+ public void onAlterPartition(AlterPartitionEvent event) throws MetaException {
+ checkIpAddress();
+ }
+
+ @Override
+ public void onLoadPartitionDone(LoadPartitionDoneEvent partEvent) throws MetaException {
+ checkIpAddress();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
new file mode 100644
index 0000000..380f3a1
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import java.io.IOException;
+import java.net.ConnectException;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MetaStoreTestUtils {
+
+ private static final Logger LOG = LoggerFactory.getLogger(MetaStoreTestUtils.class);
+ public static final int RETRY_COUNT = 10;
+
+ public static int startMetaStore() throws Exception {
+ return MetaStoreTestUtils.startMetaStore(HadoopThriftAuthBridge.getBridge(), null);
+ }
+
+ public static int startMetaStore(final HadoopThriftAuthBridge bridge, Configuration conf)
+ throws Exception {
+ int port = MetaStoreTestUtils.findFreePort();
+ MetaStoreTestUtils.startMetaStore(port, bridge, conf);
+ return port;
+ }
+
+ public static int startMetaStore(Configuration conf) throws Exception {
+ return startMetaStore(HadoopThriftAuthBridge.getBridge(), conf);
+ }
+
+ public static void startMetaStore(final int port, final HadoopThriftAuthBridge bridge) throws Exception {
+ MetaStoreTestUtils.startMetaStore(port, bridge, null);
+ }
+
+ public static void startMetaStore(final int port,
+ final HadoopThriftAuthBridge bridge, Configuration conf)
+ throws Exception{
+ if (conf == null) {
+ conf = MetastoreConf.newMetastoreConf();
+ }
+ final Configuration finalConf = conf;
+ Thread thread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ HiveMetaStore.startMetaStore(port, bridge, finalConf);
+ } catch (Throwable e) {
+ LOG.error("Metastore Thrift Server threw an exception...", e);
+ }
+ }
+ });
+ thread.setDaemon(true);
+ thread.start();
+ MetaStoreTestUtils.loopUntilHMSReady(port);
+ }
+
+ public static int startMetaStoreWithRetry(final HadoopThriftAuthBridge bridge) throws Exception {
+ return MetaStoreTestUtils.startMetaStoreWithRetry(bridge, null);
+ }
+
+ public static int startMetaStoreWithRetry(Configuration conf) throws Exception {
+ return MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
+ }
+
+ public static int startMetaStoreWithRetry() throws Exception {
+ return MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), null);
+ }
+
+ public static int startMetaStoreWithRetry(final HadoopThriftAuthBridge bridge, Configuration conf)
+ throws Exception {
+ Exception metaStoreException = null;
+ int metaStorePort = 0;
+
+ for (int tryCount = 0; tryCount < MetaStoreTestUtils.RETRY_COUNT; tryCount++) {
+ try {
+ metaStorePort = MetaStoreTestUtils.findFreePort();
+ MetaStoreTestUtils.startMetaStore(metaStorePort, bridge, conf);
+ return metaStorePort;
+ } catch (ConnectException ce) {
+ metaStoreException = ce;
+ }
+ }
+
+ throw metaStoreException;
+ }
+
+ /**
+ * A simple connect test to make sure that the metastore is up
+ * @throws Exception
+ */
+ public static void loopUntilHMSReady(int port) throws Exception {
+ int retries = 0;
+ Exception exc = null;
+ while (true) {
+ try {
+ Socket socket = new Socket();
+ socket.connect(new InetSocketAddress(port), 5000);
+ socket.close();
+ return;
+ } catch (Exception e) {
+ if (retries++ > 60) { //give up
+ exc = e;
+ break;
+ }
+ Thread.sleep(1000);
+ }
+ }
+ // something is preventing metastore from starting
+ // print the stack from all threads for debugging purposes
+ LOG.error("Unable to connect to metastore server: " + exc.getMessage());
+ LOG.info("Printing all thread stack traces for debugging before throwing exception.");
+ LOG.info(MetaStoreTestUtils.getAllThreadStacksAsString());
+ throw exc;
+ }
+
+ public static String getAllThreadStacksAsString() {
+ Map<Thread, StackTraceElement[]> threadStacks = Thread.getAllStackTraces();
+ StringBuilder sb = new StringBuilder();
+ for (Map.Entry<Thread, StackTraceElement[]> entry : threadStacks.entrySet()) {
+ Thread t = entry.getKey();
+ sb.append(System.lineSeparator());
+ sb.append("Name: ").append(t.getName()).append(" State: ").append(t.getState());
+ MetaStoreTestUtils.addStackString(entry.getValue(), sb);
+ }
+ return sb.toString();
+ }
+
+ public static void addStackString(StackTraceElement[] stackElems, StringBuilder sb) {
+ sb.append(System.lineSeparator());
+ for (StackTraceElement stackElem : stackElems) {
+ sb.append(stackElem).append(System.lineSeparator());
+ }
+ }
+
+ /**
+ * Finds a free port on the machine.
+ *
+ * @return
+ * @throws IOException
+ */
+ public static int findFreePort() throws IOException {
+ ServerSocket socket= new ServerSocket(0);
+ int port = socket.getLocalPort();
+ socket.close();
+ return port;
+ }
+
+ /**
+ * Finds a free port on the machine, but allow the
+ * ability to specify a port number to not use, no matter what.
+ */
+ public static int findFreePortExcepting(int portToExclude) throws IOException {
+ ServerSocket socket1 = null;
+ ServerSocket socket2 = null;
+ try {
+ socket1 = new ServerSocket(0);
+ socket2 = new ServerSocket(0);
+ if (socket1.getLocalPort() != portToExclude) {
+ return socket1.getLocalPort();
+ }
+ // If we're here, then socket1.getLocalPort was the port to exclude
+ // Since both sockets were open together at a point in time, we're
+ // guaranteed that socket2.getLocalPort() is not the same.
+ return socket2.getLocalPort();
+ } finally {
+ if (socket1 != null){
+ socket1.close();
+ }
+ if (socket2 != null){
+ socket2.close();
+ }
+ }
+ }
+
+ /**
+ * Setup a configuration file for standalone mode. There are a few config variables that have
+ * defaults that require parts of Hive that aren't present in standalone mode. This method
+ * sets them to something that will work without the rest of Hive.
+ * @param conf Configuration object
+ */
+ public static void setConfForStandloneMode(Configuration conf) {
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.TASK_THREADS_ALWAYS,
+ EventCleanerTask.class.getName());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java
new file mode 100644
index 0000000..346fd98
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
+
+import java.util.List;
+
+/**
+ * Test Mock-out for PartitionExpressionForMetastore.
+ */
+public class MockPartitionExpressionForMetastore implements PartitionExpressionProxy {
+ @Override
+ public String convertExprToFilter(byte[] expr) throws MetaException {
+ return null;
+ }
+
+ @Override
+ public boolean filterPartitionsByExpr(List<FieldSchema> partColumns,
+ byte[] expr, String defaultPartitionName,
+ List<String> partitionNames) throws MetaException {
+ return false;
+ }
+
+ @Override
+ public FileMetadataExprType getMetadataType(String inputFormat) {
+ return null;
+ }
+
+ @Override
+ public SearchArgument createSarg(byte[] expr) {
+ return null;
+ }
+
+ @Override
+ public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) {
+ return null;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java
new file mode 100644
index 0000000..ba86e05
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.security.Permission;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.thrift.TException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.Rule;
+import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.junit.Assert.*;
+import org.junit.Before;
+
+public class TestHiveMetaStoreGetMetaConf {
+
+ @Rule
+ public ExpectedException thrown = ExpectedException.none();
+
+ private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStoreGetMetaConf.class);
+ private static Configuration conf;
+ private static SecurityManager securityManager;
+
+ private HiveMetaStoreClient hmsc;
+
+ public static class NoExitSecurityManager extends SecurityManager {
+
+ @Override
+ public void checkPermission(Permission perm) {
+ // allow anything.
+ }
+
+ @Override
+ public void checkPermission(Permission perm, Object context) {
+ // allow anything.
+ }
+
+ @Override
+ public void checkExit(int status) {
+ super.checkExit(status);
+ throw new RuntimeException("System.exit() was called. Raising exception.");
+ }
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ LOG.info("Shutting down metastore.");
+ System.setSecurityManager(securityManager);
+ }
+
+ @BeforeClass
+ public static void startMetaStoreServer() throws Exception {
+
+ securityManager = System.getSecurityManager();
+ System.setSecurityManager(new NoExitSecurityManager());
+ Configuration metastoreConf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setClass(metastoreConf, ConfVars.EXPRESSION_PROXY_CLASS,
+ MockPartitionExpressionForMetastore.class, PartitionExpressionProxy.class);
+ MetastoreConf.setBoolVar(metastoreConf, ConfVars.TRY_DIRECT_SQL_DDL, false);
+ MetaStoreTestUtils.setConfForStandloneMode(metastoreConf);
+ int msPort = MetaStoreUtils.startMetaStore(metastoreConf);
+ conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + msPort);
+ MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+ MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 10);
+ }
+
+ @Before
+ public void setup() throws MetaException {
+ hmsc = new HiveMetaStoreClient(conf);
+ }
+
+ @After
+ public void closeClient() {
+ if (hmsc != null) {
+ hmsc.close();
+ }
+ }
+
+ @Test
+ public void testGetMetaConfDefault() throws TException {
+ ConfVars metaConfVar = ConfVars.TRY_DIRECT_SQL;
+ String expected = metaConfVar.getDefaultVal().toString();
+ String actual = hmsc.getMetaConf(metaConfVar.toString());
+ assertEquals(expected, actual);
+ }
+
+ @Test
+ public void testGetMetaConfDefaultEmptyString() throws TException {
+ ConfVars metaConfVar = ConfVars.PARTITION_NAME_WHITELIST_PATTERN;
+ String expected = "";
+ String actual = hmsc.getMetaConf(metaConfVar.toString());
+ assertEquals(expected, actual);
+ }
+
+ @Test
+ public void testGetMetaConfOverridden() throws TException {
+ ConfVars metaConfVar = ConfVars.TRY_DIRECT_SQL_DDL;
+ String expected = "false";
+ String actual = hmsc.getMetaConf(metaConfVar.toString());
+ assertEquals(expected, actual);
+ }
+
+ @Test
+ public void testGetMetaConfUnknownPreperty() throws TException {
+ String unknownPropertyName = "hive.meta.foo.bar";
+ thrown.expect(MetaException.class);
+ thrown.expectMessage("Invalid configuration key " + unknownPropertyName);
+ hmsc.getMetaConf(unknownPropertyName);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java
new file mode 100644
index 0000000..57e5a41
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java
@@ -0,0 +1,407 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.partition.spec.CompositePartitionSpecProxy;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.security.Permission;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Test to check PartitionSpec support in HiveMetaStore.
+ */
+public class TestHiveMetaStorePartitionSpecs {
+
+ private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStorePartitionSpecs.class);
+ private static int msPort;
+ private static Configuration conf;
+ private static SecurityManager securityManager;
+
+ public static class NoExitSecurityManager extends SecurityManager {
+
+ @Override
+ public void checkPermission(Permission perm) {
+ // allow anything.
+ }
+
+ @Override
+ public void checkPermission(Permission perm, Object context) {
+ // allow anything.
+ }
+
+ @Override
+ public void checkExit(int status) {
+
+ super.checkExit(status);
+ throw new RuntimeException("System.exit() was called. Raising exception. ");
+ }
+ }
+
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ LOG.info("Shutting down metastore.");
+ System.setSecurityManager(securityManager);
+
+ HiveMetaStoreClient hmsc = new HiveMetaStoreClient(conf);
+ hmsc.dropDatabase(dbName, true, true, true);
+ }
+
+ @BeforeClass
+ public static void startMetaStoreServer() throws Exception {
+
+ Configuration metastoreConf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setClass(metastoreConf, ConfVars.EXPRESSION_PROXY_CLASS,
+ MockPartitionExpressionForMetastore.class, PartitionExpressionProxy.class);
+ MetaStoreTestUtils.setConfForStandloneMode(metastoreConf);
+ msPort = MetaStoreTestUtils.startMetaStore(metastoreConf);
+ securityManager = System.getSecurityManager();
+ System.setSecurityManager(new NoExitSecurityManager());
+ conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + msPort);
+ MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+ MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+ MetastoreConf.setClass(conf, ConfVars.EXPRESSION_PROXY_CLASS,
+ MockPartitionExpressionForMetastore.class, PartitionExpressionProxy.class);
+ }
+
+ private static String dbName = "testpartitionspecs_db";
+ private static String tableName = "testpartitionspecs_table";
+ private static int nDates = 10;
+ private static String datePrefix = "2014010";
+
+ private static void createTable(HiveMetaStoreClient hmsc, boolean enablePartitionGrouping) throws Exception {
+
+
+ List<FieldSchema> columns = new ArrayList<>();
+ columns.add(new FieldSchema("foo", "string", ""));
+ columns.add(new FieldSchema("bar", "string", ""));
+
+ List<FieldSchema> partColumns = new ArrayList<>();
+ partColumns.add(new FieldSchema("dt", "string", ""));
+ partColumns.add(new FieldSchema("blurb", "string", ""));
+
+ SerDeInfo serdeInfo = new SerDeInfo("LBCSerDe",
+ "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", new HashMap<>());
+
+ StorageDescriptor storageDescriptor
+ = new StorageDescriptor(columns, null,
+ "org.apache.hadoop.hive.ql.io.RCFileInputFormat",
+ "org.apache.hadoop.hive.ql.io.RCFileOutputFormat",
+ false, 0, serdeInfo, null, null, null);
+
+ Map<String, String> tableParameters = new HashMap<>();
+ tableParameters.put("hive.hcatalog.partition.spec.grouping.enabled", enablePartitionGrouping? "true":"false");
+ Table table = new Table(tableName, dbName, "", 0, 0, 0, storageDescriptor, partColumns, tableParameters, "", "", "");
+
+ hmsc.createTable(table);
+ Assert.assertTrue("Table " + dbName + "." + tableName + " does not exist",
+ hmsc.tableExists(dbName, tableName));
+
+ }
+
+ private static void clearAndRecreateDB(HiveMetaStoreClient hmsc) throws Exception {
+ hmsc.dropDatabase(dbName,
+ true, // Delete data.
+ true, // Ignore unknownDB.
+ true // Cascade.
+ );
+
+ hmsc.createDatabase(new Database(dbName,
+ "", // Description.
+ null, // Location.
+ null // Parameters.
+ ));
+ }
+
+ // Get partition-path. For grid='XYZ', place the partition outside the table-path.
+ private static String getPartitionPath(Table table, List<String> partValues) {
+
+ return partValues.get(1).equalsIgnoreCase("isLocatedOutsideTablePath")? // i.e. Is the partition outside the table-dir?
+ table.getSd().getLocation().replace(table.getTableName(), "location_outside_" + table.getTableName())
+ + "_" + partValues.get(0) + "_" + partValues.get(1)
+ : null ; // Use defaults... Partitions are put in the table directory.
+
+ }
+
+ private static void populatePartitions(HiveMetaStoreClient hmsc, Table table, List<String> blurbs) throws Exception {
+ for (int i=0; i< nDates; ++i) {
+ for (String blurb : blurbs) {
+ StorageDescriptor sd = new StorageDescriptor(table.getSd());
+ // Add partitions located in the table-directory (i.e. default).
+ List<String> values = Arrays.asList(datePrefix + i, blurb);
+ sd.setLocation(getPartitionPath(table, values));
+ hmsc.add_partition(new Partition(values, dbName, tableName, 0, 0, sd, null));
+ }
+ }
+ }
+
+ private void testGetPartitionSpecs(boolean enablePartitionGrouping) {
+ try {
+ HiveMetaStoreClient hmsc = new HiveMetaStoreClient(conf);
+ clearAndRecreateDB(hmsc);
+ createTable(hmsc, enablePartitionGrouping);
+ Table table = hmsc.getTable(dbName, tableName);
+ populatePartitions(hmsc, table, Arrays.asList("isLocatedInTablePath", "isLocatedOutsideTablePath"));
+
+ PartitionSpecProxy partitionSpecProxy = hmsc.listPartitionSpecs(dbName, tableName, -1);
+ Assert.assertEquals( "Unexpected number of partitions.", nDates * 2, partitionSpecProxy.size());
+
+ Map<String, List<String>> locationToDateMap = new HashMap<>();
+ locationToDateMap.put("isLocatedInTablePath", new ArrayList<>());
+ locationToDateMap.put("isLocatedOutsideTablePath", new ArrayList<>());
+ PartitionSpecProxy.PartitionIterator iterator = partitionSpecProxy.getPartitionIterator();
+
+ while (iterator.hasNext()) {
+ Partition partition = iterator.next();
+ locationToDateMap.get(partition.getValues().get(1)).add(partition.getValues().get(0));
+ }
+
+ List<String> expectedDates = new ArrayList<>(nDates);
+ for (int i=0; i<nDates; ++i) {
+ expectedDates.add(datePrefix + i);
+ }
+
+ Assert.assertArrayEquals("Unexpected date-values.", expectedDates.toArray(), locationToDateMap.get("isLocatedInTablePath").toArray());
+ Assert.assertArrayEquals("Unexpected date-values.", expectedDates.toArray(), locationToDateMap.get("isLocatedOutsideTablePath").toArray());
+
+ partitionSpecProxy = hmsc.listPartitionSpecsByFilter(dbName, tableName, "blurb = \"isLocatedOutsideTablePath\"", -1);
+ locationToDateMap.get("isLocatedInTablePath").clear();
+ locationToDateMap.get("isLocatedOutsideTablePath").clear();
+ iterator = partitionSpecProxy.getPartitionIterator();
+
+ while (iterator.hasNext()) {
+ Partition partition = iterator.next();
+ locationToDateMap.get(partition.getValues().get(1)).add(partition.getValues().get(0));
+ }
+
+ Assert.assertEquals("Unexpected date-values.", 0, locationToDateMap.get("isLocatedInTablePath").size());
+ Assert.assertArrayEquals("Unexpected date-values.", expectedDates.toArray(), locationToDateMap.get("isLocatedOutsideTablePath").toArray());
+
+
+ }
+ catch (Throwable t) {
+ LOG.error("Unexpected Exception!", t);
+ t.printStackTrace();
+ Assert.assertTrue("Unexpected Exception!", false);
+ }
+ }
+
+ /**
+ * Test for HiveMetaStoreClient.listPartitionSpecs() and HiveMetaStoreClient.listPartitionSpecsByFilter().
+ * Check behaviour with and without Partition-grouping enabled.
+ */
+ @Test
+ public void testGetPartitionSpecs_WithAndWithoutPartitionGrouping() {
+ testGetPartitionSpecs(true);
+ testGetPartitionSpecs(false);
+ }
+
+
+ /**
+ * Test to confirm that partitions can be added using PartitionSpecs.
+ */
+ @Test
+ public void testAddPartitions() {
+ try {
+ // Create source table.
+ HiveMetaStoreClient hmsc = new HiveMetaStoreClient(conf);
+ clearAndRecreateDB(hmsc);
+ createTable(hmsc, true);
+ Table table = hmsc.getTable(dbName, tableName);
+ populatePartitions(hmsc, table, Arrays.asList("isLocatedInTablePath", "isLocatedOutsideTablePath"));
+
+ // Clone the table,
+ String targetTableName = "cloned_" + tableName;
+ Table targetTable = new Table(table);
+ targetTable.setTableName(targetTableName);
+ StorageDescriptor targetTableSd = new StorageDescriptor(targetTable.getSd());
+ targetTableSd.setLocation(
+ targetTableSd.getLocation().replace( tableName, targetTableName));
+ hmsc.createTable(targetTable);
+
+ // Get partition-list from source.
+ PartitionSpecProxy partitionsForAddition
+ = hmsc.listPartitionSpecsByFilter(dbName, tableName, "blurb = \"isLocatedInTablePath\"", -1);
+ partitionsForAddition.setTableName(targetTableName);
+ partitionsForAddition.setRootLocation(targetTableSd.getLocation());
+
+ Assert.assertEquals("Unexpected number of partitions added. ",
+ partitionsForAddition.size(), hmsc.add_partitions_pspec(partitionsForAddition));
+
+ // Check that the added partitions are as expected.
+ PartitionSpecProxy clonedPartitions = hmsc.listPartitionSpecs(dbName, targetTableName, -1);
+ Assert.assertEquals("Unexpected number of partitions returned. ",
+ partitionsForAddition.size(), clonedPartitions.size());
+
+ PartitionSpecProxy.PartitionIterator sourceIterator = partitionsForAddition.getPartitionIterator(),
+ targetIterator = clonedPartitions.getPartitionIterator();
+
+ while (targetIterator.hasNext()) {
+ Partition sourcePartition = sourceIterator.next(),
+ targetPartition = targetIterator.next();
+ Assert.assertEquals("Mismatched values.",
+ sourcePartition.getValues(), targetPartition.getValues());
+ Assert.assertEquals("Mismatched locations.",
+ sourcePartition.getSd().getLocation(), targetPartition.getSd().getLocation());
+ }
+ }
+ catch (Throwable t) {
+ LOG.error("Unexpected Exception!", t);
+ t.printStackTrace();
+ Assert.assertTrue("Unexpected Exception!", false);
+ }
+ }
+
+ /**
+ * Test to confirm that Partition-grouping behaves correctly when Table-schemas evolve.
+ * Partitions must be grouped by location and schema.
+ */
+ @Test
+ public void testFetchingPartitionsWithDifferentSchemas() {
+ try {
+ // Create source table.
+ HiveMetaStoreClient hmsc = new HiveMetaStoreClient(conf);
+ clearAndRecreateDB(hmsc);
+ createTable(hmsc, true);
+ Table table = hmsc.getTable(dbName, tableName);
+ populatePartitions(hmsc,
+ table,
+ Arrays.asList("isLocatedInTablePath", "isLocatedOutsideTablePath") // Blurb list.
+ );
+
+ // Modify table schema. Add columns.
+ List<FieldSchema> fields = table.getSd().getCols();
+ fields.add(new FieldSchema("goo", "string", "Entirely new column. Doesn't apply to older partitions."));
+ table.getSd().setCols(fields);
+ hmsc.alter_table(dbName, tableName, table);
+ // Check that the change stuck.
+ table = hmsc.getTable(dbName,tableName);
+ Assert.assertEquals("Unexpected number of table columns.",
+ 3, table.getSd().getColsSize());
+
+ // Add partitions with new schema.
+ // Mark Partitions with new schema with different blurb.
+ populatePartitions(hmsc, table, Arrays.asList("hasNewColumn"));
+
+ // Retrieve *all* partitions from the table.
+ PartitionSpecProxy partitionSpecProxy = hmsc.listPartitionSpecs(dbName, tableName, -1);
+ Assert.assertEquals("Unexpected number of partitions.", nDates * 3, partitionSpecProxy.size());
+
+ // Confirm grouping.
+ Assert.assertTrue("Unexpected type of PartitionSpecProxy.", partitionSpecProxy instanceof CompositePartitionSpecProxy);
+ CompositePartitionSpecProxy compositePartitionSpecProxy = (CompositePartitionSpecProxy)partitionSpecProxy;
+ List<PartitionSpec> partitionSpecs = compositePartitionSpecProxy.toPartitionSpec();
+ Assert.assertTrue("PartitionSpec[0] should have been a SharedSDPartitionSpec.",
+ partitionSpecs.get(0).isSetSharedSDPartitionSpec());
+ Assert.assertEquals("PartitionSpec[0] should use the table-path as the common root location. ",
+ table.getSd().getLocation(), partitionSpecs.get(0).getRootPath());
+ Assert.assertTrue("PartitionSpec[1] should have been a SharedSDPartitionSpec.",
+ partitionSpecs.get(1).isSetSharedSDPartitionSpec());
+ Assert.assertEquals("PartitionSpec[1] should use the table-path as the common root location. ",
+ table.getSd().getLocation(), partitionSpecs.get(1).getRootPath());
+ Assert.assertTrue("PartitionSpec[2] should have been a ListComposingPartitionSpec.",
+ partitionSpecs.get(2).isSetPartitionList());
+
+ // Categorize the partitions returned, and confirm that all partitions are accounted for.
+ PartitionSpecProxy.PartitionIterator iterator = partitionSpecProxy.getPartitionIterator();
+ Map<String, List<Partition>> blurbToPartitionList = new HashMap<>(3);
+ while (iterator.hasNext()) {
+
+ Partition partition = iterator.next();
+ String blurb = partition.getValues().get(1);
+
+ if (!blurbToPartitionList.containsKey(blurb)) {
+ blurbToPartitionList.put(blurb, new ArrayList<>(nDates));
+ }
+
+ blurbToPartitionList.get(blurb).add(partition);
+
+ } // </Classification>
+
+ // All partitions with blurb="isLocatedOutsideTablePath" should have 2 columns,
+ // and must have locations outside the table directory.
+ for (Partition partition : blurbToPartitionList.get("isLocatedOutsideTablePath")) {
+ Assert.assertEquals("Unexpected number of columns.", 2, partition.getSd().getCols().size());
+ Assert.assertEquals("Unexpected first column.", "foo", partition.getSd().getCols().get(0).getName());
+ Assert.assertEquals("Unexpected second column.", "bar", partition.getSd().getCols().get(1).getName());
+ String partitionLocation = partition.getSd().getLocation();
+ String tableLocation = table.getSd().getLocation();
+ Assert.assertTrue("Unexpected partition location: " + partitionLocation + ". " +
+ "Partition should have been outside table location: " + tableLocation,
+ !partitionLocation.startsWith(tableLocation));
+ }
+
+ // All partitions with blurb="isLocatedInTablePath" should have 2 columns,
+ // and must have locations within the table directory.
+ for (Partition partition : blurbToPartitionList.get("isLocatedInTablePath")) {
+ Assert.assertEquals("Unexpected number of columns.", 2, partition.getSd().getCols().size());
+ Assert.assertEquals("Unexpected first column.", "foo", partition.getSd().getCols().get(0).getName());
+ Assert.assertEquals("Unexpected second column.", "bar", partition.getSd().getCols().get(1).getName());
+ String partitionLocation = partition.getSd().getLocation();
+ String tableLocation = table.getSd().getLocation();
+ Assert.assertTrue("Unexpected partition location: " + partitionLocation + ". " +
+ "Partition should have been within table location: " + tableLocation,
+ partitionLocation.startsWith(tableLocation));
+ }
+
+ // All partitions with blurb="hasNewColumn" were added after the table schema changed,
+ // and must have 3 columns. Also, the partition locations must lie within the table directory.
+ for (Partition partition : blurbToPartitionList.get("hasNewColumn")) {
+ Assert.assertEquals("Unexpected number of columns.", 3, partition.getSd().getCols().size());
+ Assert.assertEquals("Unexpected first column.", "foo", partition.getSd().getCols().get(0).getName());
+ Assert.assertEquals("Unexpected second column.", "bar", partition.getSd().getCols().get(1).getName());
+ Assert.assertEquals("Unexpected third column.", "goo", partition.getSd().getCols().get(2).getName());
+ String partitionLocation = partition.getSd().getLocation();
+ String tableLocation = table.getSd().getLocation();
+ Assert.assertTrue("Unexpected partition location: " + partitionLocation + ". " +
+ "Partition should have been within table location: " + tableLocation,
+ partitionLocation.startsWith(tableLocation));
+ }
+
+ }
+ catch (Throwable t) {
+ LOG.error("Unexpected Exception!", t);
+ t.printStackTrace();
+ Assert.assertTrue("Unexpected Exception!", false);
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java
new file mode 100644
index 0000000..1489975
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test long running request timeout functionality in MetaStore Server
+ * HiveMetaStore.HMSHandler.create_database() is used to simulate a long running method.
+ */
+public class TestHiveMetaStoreTimeout {
+ protected static HiveMetaStoreClient client;
+ protected static Configuration conf;
+ protected static Warehouse warehouse;
+
+ @BeforeClass
+ public static void setUp() throws Exception {
+ HiveMetaStore.TEST_TIMEOUT_ENABLED = true;
+ conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setClass(conf, ConfVars.EXPRESSION_PROXY_CLASS,
+ MockPartitionExpressionForMetastore.class, PartitionExpressionProxy.class);
+ MetastoreConf.setTimeVar(conf, ConfVars.CLIENT_SOCKET_TIMEOUT, 1000,
+ TimeUnit.MILLISECONDS);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+ warehouse = new Warehouse(conf);
+ client = new HiveMetaStoreClient(conf);
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ HiveMetaStore.TEST_TIMEOUT_ENABLED = false;
+ try {
+ client.close();
+ } catch (Throwable e) {
+ System.err.println("Unable to close metastore");
+ System.err.println(StringUtils.stringifyException(e));
+ throw e;
+ }
+ }
+
+ @Test
+ public void testNoTimeout() throws Exception {
+ HiveMetaStore.TEST_TIMEOUT_VALUE = 250;
+
+ String dbName = "db";
+ client.dropDatabase(dbName, true, true);
+
+ Database db = new Database();
+ db.setName(dbName);
+ try {
+ client.createDatabase(db);
+ } catch (MetaException e) {
+ Assert.fail("should not throw timeout exception: " + e.getMessage());
+ }
+
+ client.dropDatabase(dbName, true, true);
+ }
+
+ @Test
+ public void testTimeout() throws Exception {
+ HiveMetaStore.TEST_TIMEOUT_VALUE = 2 * 1000;
+
+ String dbName = "db";
+ client.dropDatabase(dbName, true, true);
+
+ Database db = new Database();
+ db.setName(dbName);
+ try {
+ client.createDatabase(db);
+ Assert.fail("should throw timeout exception.");
+ } catch (MetaException e) {
+ Assert.assertTrue("unexpected MetaException", e.getMessage().contains("Timeout when " +
+ "executing method: create_database"));
+ }
+
+ // restore
+ HiveMetaStore.TEST_TIMEOUT_VALUE = 1;
+ }
+
+ @Test
+ public void testResetTimeout() throws Exception {
+ HiveMetaStore.TEST_TIMEOUT_VALUE = 250;
+ String dbName = "db";
+
+ // no timeout before reset
+ client.dropDatabase(dbName, true, true);
+ Database db = new Database();
+ db.setName(dbName);
+ try {
+ client.createDatabase(db);
+ } catch (MetaException e) {
+ Assert.fail("should not throw timeout exception: " + e.getMessage());
+ }
+ client.dropDatabase(dbName, true, true);
+
+ // reset
+ HiveMetaStore.TEST_TIMEOUT_VALUE = 2000;
+ client.setMetaConf(ConfVars.CLIENT_SOCKET_TIMEOUT.getVarname(), "1s");
+
+ // timeout after reset
+ try {
+ client.createDatabase(db);
+ Assert.fail("should throw timeout exception.");
+ } catch (MetaException e) {
+ Assert.assertTrue("unexpected MetaException", e.getMessage().contains("Timeout when " +
+ "executing method: create_database"));
+ }
+
+ // restore
+ client.dropDatabase(dbName, true, true);
+ client.setMetaConf(ConfVars.CLIENT_SOCKET_TIMEOUT.getVarname(), "10s");
+ }
+}
\ No newline at end of file