You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by mo...@apache.org on 2016/11/14 17:12:14 UTC
[2/2] hive git commit: HIVE-13966: DbNotificationListener: can loose
DDL operation notifications (Mohit Sabharwal reviewed by Chaoyu Tang)
HIVE-13966: DbNotificationListener: can loose DDL operation notifications (Mohit Sabharwal reviewed by Chaoyu Tang)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3918a639
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3918a639
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3918a639
Branch: refs/heads/master
Commit: 3918a639f0ae24b1b7d8cedb08613a37d266b78b
Parents: 739ac3a
Author: Mohit Sabharwal <mo...@cloudera.com>
Authored: Mon Nov 14 11:57:52 2016 -0500
Committer: Mohit Sabharwal <mo...@cloudera.com>
Committed: Mon Nov 14 11:57:52 2016 -0500
----------------------------------------------------------------------
.../org/apache/hadoop/hive/conf/HiveConf.java | 60 +-
.../listener/DummyRawStoreFailEvent.java | 894 +++++++++++++++++++
.../listener/TestDbNotificationListener.java | 100 ++-
.../metastore/TestMetaStoreEventListener.java | 5 +-
.../hadoop/hive/metastore/AlterHandler.java | 116 ++-
.../hadoop/hive/metastore/HiveAlterHandler.java | 177 +++-
.../hadoop/hive/metastore/HiveMetaStore.java | 408 +++++++--
7 files changed, 1609 insertions(+), 151 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/3918a639/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 3a65c80..a818750 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -18,6 +18,31 @@
package org.apache.hadoop.hive.conf;
+import com.google.common.base.Joiner;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.common.classification.InterfaceAudience;
+import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
+import org.apache.hadoop.hive.conf.Validator.PatternSet;
+import org.apache.hadoop.hive.conf.Validator.RangeValidator;
+import org.apache.hadoop.hive.conf.Validator.RatioValidator;
+import org.apache.hadoop.hive.conf.Validator.SizeValidator;
+import org.apache.hadoop.hive.conf.Validator.StringSet;
+import org.apache.hadoop.hive.conf.Validator.TimeValidator;
+import org.apache.hadoop.hive.conf.Validator.WritableDirectoryValidator;
+import org.apache.hadoop.hive.shims.Utils;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Shell;
+import org.apache.hive.common.HiveCompat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.security.auth.login.LoginException;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
@@ -43,32 +68,6 @@ import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import javax.security.auth.login.LoginException;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
-import org.apache.hadoop.hive.conf.Validator.PatternSet;
-import org.apache.hadoop.hive.conf.Validator.RangeValidator;
-import org.apache.hadoop.hive.conf.Validator.RatioValidator;
-import org.apache.hadoop.hive.conf.Validator.SizeValidator;
-import org.apache.hadoop.hive.conf.Validator.StringSet;
-import org.apache.hadoop.hive.conf.Validator.TimeValidator;
-import org.apache.hadoop.hive.conf.Validator.WritableDirectoryValidator;
-import org.apache.hadoop.hive.shims.Utils;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Shell;
-import org.apache.hive.common.HiveCompat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Joiner;
-
/**
* Hive Configuration.
*/
@@ -248,6 +247,7 @@ public class HiveConf extends Configuration {
HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS,
HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX,
HiveConf.ConfVars.METASTORE_EVENT_LISTENERS,
+ HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS,
HiveConf.ConfVars.METASTORE_EVENT_CLEAN_FREQ,
HiveConf.ConfVars.METASTORE_EVENT_EXPIRY_DURATION,
HiveConf.ConfVars.METASTORE_FILTER_HOOK,
@@ -767,7 +767,13 @@ public class HiveConf extends Configuration {
"An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener."),
METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", "",
"List of comma separated listeners for metastore events."),
- METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", "", ""),
+ METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", "",
+ "A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener" +
+ " interface. The metastore event and corresponding listener method will be invoked in separate JDO transactions. " +
+ "Alternatively, configure hive.metastore.transactional.event.listeners to ensure both are invoked in same JDO transaction."),
+ METASTORE_TRANSACTIONAL_EVENT_LISTENERS("hive.metastore.transactional.event.listeners", "",
+ "A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener" +
+ " interface. Both the metastore event and corresponding listener method will be invoked in the same JDO transaction."),
METASTORE_EVENT_DB_LISTENER_TTL("hive.metastore.event.db.listener.timetolive", "86400s",
new TimeValidator(TimeUnit.SECONDS),
"time after which events will be removed from the database listener queue"),
http://git-wip-us.apache.org/repos/asf/hive/blob/3918a639/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
new file mode 100644
index 0000000..4a7801b
--- /dev/null
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -0,0 +1,894 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.hcatalog.listener;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.FileMetadataHandler;
+import org.apache.hadoop.hive.metastore.ObjectStore;
+import org.apache.hadoop.hive.metastore.RawStore;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.thrift.TException;
+
+/**
+ * An implementation {@link org.apache.hadoop.hive.metastore.RawStore}
+ * with the ability to fail metastore events for the purpose of testing.
+ * Events are expected to succeed by default and simply delegate to an
+ * embedded ObjectStore object. The behavior can be changed based on
+ * a flag by calling setEventSucceed().
+ *
+ * Ideally, we should have just extended ObjectStore instead of using
+ * delegation. However, since HiveMetaStore uses a Proxy, this class must
+ * not inherit from any other class.
+ */
+public class DummyRawStoreFailEvent implements RawStore, Configurable {
+
+ private final ObjectStore objectStore;
+ public DummyRawStoreFailEvent() {
+ objectStore = new ObjectStore();
+ }
+
+ private static boolean shouldEventSucceed = true;
+ public static void setEventSucceed(boolean flag) {
+ shouldEventSucceed = flag;
+ }
+
+ @Override
+ public boolean commitTransaction() {
+ return objectStore.commitTransaction();
+ }
+
+ @Override
+ public Configuration getConf() {
+ return objectStore.getConf();
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ objectStore.setConf(conf);
+ }
+
+ @Override
+ public void shutdown() {
+ objectStore.shutdown();
+ }
+
+ @Override
+ public boolean openTransaction() {
+ return objectStore.openTransaction();
+ }
+
+ @Override
+ public void rollbackTransaction() {
+ objectStore.rollbackTransaction();
+ }
+
+ @Override
+ public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+ if (shouldEventSucceed) {
+ objectStore.createDatabase(db);
+ } else {
+ throw new RuntimeException("Failed event");
+ }
+ }
+
+ @Override
+ public Database getDatabase(String dbName) throws NoSuchObjectException {
+ return objectStore.getDatabase(dbName);
+ }
+
+ @Override
+ public boolean dropDatabase(String dbName)
+ throws NoSuchObjectException, MetaException {
+ if (shouldEventSucceed) {
+ return objectStore.dropDatabase(dbName);
+ } else {
+ throw new RuntimeException("Event failed.");
+ }
+ }
+
+ @Override
+ public boolean alterDatabase(String dbName, Database db)
+ throws NoSuchObjectException, MetaException {
+ return objectStore.alterDatabase(dbName, db);
+ }
+
+ @Override
+ public List<String> getDatabases(String pattern) throws MetaException {
+ return objectStore.getDatabases(pattern);
+ }
+
+ @Override
+ public List<String> getAllDatabases() throws MetaException {
+ return objectStore.getAllDatabases();
+ }
+
+ @Override
+ public boolean createType(Type type) {
+ return objectStore.createType(type);
+ }
+
+ @Override
+ public Type getType(String typeName) {
+ return objectStore.getType(typeName);
+ }
+
+ @Override
+ public boolean dropType(String typeName) {
+ return objectStore.dropType(typeName);
+ }
+
+ @Override
+ public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+ if (shouldEventSucceed) {
+ objectStore.createTable(tbl);
+ } else {
+ throw new RuntimeException("Event failed.");
+ }
+ }
+
+ @Override
+ public boolean dropTable(String dbName, String tableName)
+ throws MetaException, NoSuchObjectException,
+ InvalidObjectException, InvalidInputException {
+ if (shouldEventSucceed) {
+ return objectStore.dropTable(dbName, tableName);
+ } else {
+ throw new RuntimeException("Event failed.");
+ }
+ }
+
+ @Override
+ public Table getTable(String dbName, String tableName) throws MetaException {
+ return objectStore.getTable(dbName, tableName);
+ }
+
+ @Override
+ public boolean addPartition(Partition part)
+ throws InvalidObjectException, MetaException {
+ return objectStore.addPartition(part);
+ }
+
+ @Override
+ public Partition getPartition(String dbName, String tableName, List<String> partVals)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartition(dbName, tableName, partVals);
+ }
+
+ @Override
+ public boolean dropPartition(String dbName, String tableName, List<String> partVals)
+ throws MetaException, NoSuchObjectException,
+ InvalidObjectException, InvalidInputException {
+ if (shouldEventSucceed) {
+ return objectStore.dropPartition(dbName, tableName, partVals);
+ } else {
+ throw new RuntimeException("Event failed.");
+ }
+ }
+
+ @Override
+ public List<Partition> getPartitions(String dbName, String tableName, int max)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartitions(dbName, tableName, max);
+ }
+
+ @Override
+ public void alterTable(String dbName, String name, Table newTable)
+ throws InvalidObjectException, MetaException {
+ if (shouldEventSucceed) {
+ objectStore.alterTable(dbName, name, newTable);
+ } else {
+ throw new RuntimeException("Event failed.");
+ }
+ }
+
+ @Override
+ public List<String> getTables(String dbName, String pattern) throws MetaException {
+ return objectStore.getTables(dbName, pattern);
+ }
+
+ @Override
+ public List<String> getTables(String dbName, String pattern, TableType tableType) throws MetaException {
+ return objectStore.getTables(dbName, pattern, tableType);
+ }
+
+ @Override
+ public List<TableMeta> getTableMeta(String dbNames, String tableNames, List<String> tableTypes)
+ throws MetaException {
+ return objectStore.getTableMeta(dbNames, tableNames, tableTypes);
+ }
+
+ @Override
+ public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
+ throws MetaException, UnknownDBException {
+ return objectStore.getTableObjectsByName(dbName, tableNames);
+ }
+
+ @Override
+ public List<String> getAllTables(String dbName) throws MetaException {
+ return objectStore.getAllTables(dbName);
+ }
+
+ @Override
+ public List<String> listTableNamesByFilter(String dbName, String filter,
+ short maxTables) throws MetaException, UnknownDBException {
+ return objectStore.listTableNamesByFilter(dbName, filter, maxTables);
+ }
+
+ @Override
+ public List<String> listPartitionNames(String dbName, String tblName, short maxParts)
+ throws MetaException {
+ return objectStore.listPartitionNames(dbName, tblName, maxParts);
+ }
+
+ @Override
+ public List<String> listPartitionNamesByFilter(String dbName, String tblName,
+ String filter, short maxParts) throws MetaException {
+ return objectStore.listPartitionNamesByFilter(dbName, tblName, filter, maxParts);
+ }
+
+ @Override
+ public void alterPartition(String dbName, String tblName, List<String> partVals,
+ Partition newPart) throws InvalidObjectException, MetaException {
+ if (shouldEventSucceed) {
+ objectStore.alterPartition(dbName, tblName, partVals, newPart);
+ } else {
+ throw new RuntimeException("Event failed.");
+ }
+ }
+
+ @Override
+ public void alterPartitions(String dbName, String tblName,
+ List<List<String>> partValsList, List<Partition> newParts)
+ throws InvalidObjectException, MetaException {
+ objectStore.alterPartitions(dbName, tblName, partValsList, newParts);
+ }
+
+ @Override
+ public boolean addIndex(Index index) throws InvalidObjectException, MetaException {
+ return objectStore.addIndex(index);
+ }
+
+ @Override
+ public Index getIndex(String dbName, String origTableName, String indexName)
+ throws MetaException {
+ return objectStore.getIndex(dbName, origTableName, indexName);
+ }
+
+ @Override
+ public boolean dropIndex(String dbName, String origTableName, String indexName)
+ throws MetaException {
+ return objectStore.dropIndex(dbName, origTableName, indexName);
+ }
+
+ @Override
+ public List<Index> getIndexes(String dbName, String origTableName, int max)
+ throws MetaException {
+ return objectStore.getIndexes(dbName, origTableName, max);
+ }
+
+ @Override
+ public List<String> listIndexNames(String dbName, String origTableName, short max)
+ throws MetaException {
+ return objectStore.listIndexNames(dbName, origTableName, max);
+ }
+
+ @Override
+ public void alterIndex(String dbName, String baseTblName, String name, Index newIndex)
+ throws InvalidObjectException, MetaException {
+ objectStore.alterIndex(dbName, baseTblName, name, newIndex);
+ }
+
+ @Override
+ public List<Partition> getPartitionsByFilter(String dbName, String tblName,
+ String filter, short maxParts) throws MetaException, NoSuchObjectException {
+ return objectStore.getPartitionsByFilter(dbName, tblName, filter, maxParts);
+ }
+
+ @Override
+ public int getNumPartitionsByFilter(String dbName, String tblName,
+ String filter) throws MetaException, NoSuchObjectException {
+ return objectStore.getNumPartitionsByFilter(dbName, tblName, filter);
+ }
+
+ @Override
+ public int getNumPartitionsByExpr(String dbName, String tblName,
+ byte[] expr) throws MetaException, NoSuchObjectException {
+ return objectStore.getNumPartitionsByExpr(dbName, tblName, expr);
+ }
+
+ @Override
+ public List<Partition> getPartitionsByNames(String dbName, String tblName,
+ List<String> partNames) throws MetaException, NoSuchObjectException {
+ return objectStore.getPartitionsByNames(dbName, tblName, partNames);
+ }
+
+ @Override
+ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
+ String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
+ return objectStore.getPartitionsByExpr(
+ dbName, tblName, expr, defaultPartitionName, maxParts, result);
+ }
+
+ @Override
+ public Table markPartitionForEvent(String dbName, String tblName,
+ Map<String, String> partVals, PartitionEventType evtType)
+ throws MetaException, UnknownTableException, InvalidPartitionException,
+ UnknownPartitionException {
+ return objectStore.markPartitionForEvent(dbName, tblName, partVals, evtType);
+ }
+
+ @Override
+ public boolean isPartitionMarkedForEvent(String dbName, String tblName,
+ Map<String, String> partName, PartitionEventType evtType)
+ throws MetaException, UnknownTableException, InvalidPartitionException,
+ UnknownPartitionException {
+ return objectStore.isPartitionMarkedForEvent(dbName, tblName, partName, evtType);
+ }
+
+ @Override
+ public boolean addRole(String rowName, String ownerName) throws InvalidObjectException,
+ MetaException, NoSuchObjectException {
+ return objectStore.addRole(rowName, ownerName);
+ }
+
+ @Override
+ public boolean removeRole(String roleName)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.removeRole(roleName);
+ }
+
+ @Override
+ public boolean grantRole(Role role, String userName, PrincipalType principalType,
+ String grantor, PrincipalType grantorType, boolean grantOption)
+ throws MetaException, NoSuchObjectException, InvalidObjectException {
+ return objectStore.grantRole(role, userName, principalType, grantor, grantorType,
+ grantOption);
+ }
+
+ @Override
+ public boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.revokeRole(role, userName, principalType, grantOption);
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
+ List<String> groupNames) throws InvalidObjectException, MetaException {
+ return objectStore.getUserPrivilegeSet(userName, groupNames);
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName,
+ List<String> groupNames) throws InvalidObjectException, MetaException {
+ return objectStore.getDBPrivilegeSet(dbName, userName, groupNames);
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName,
+ String userName, List<String> groupNames)
+ throws InvalidObjectException, MetaException {
+ return objectStore.getTablePrivilegeSet(dbName, tableName, userName, groupNames);
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName,
+ String partition, String userName, List<String> groupNames)
+ throws InvalidObjectException, MetaException {
+ return objectStore.getPartitionPrivilegeSet(dbName, tableName, partition,
+ userName, groupNames);
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName,
+ String partitionName, String columnName, String userName,
+ List<String> groupNames)
+ throws InvalidObjectException, MetaException {
+ return objectStore.getColumnPrivilegeSet(dbName, tableName, partitionName,
+ columnName, userName, groupNames);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
+ PrincipalType principalType) {
+ return objectStore.listPrincipalGlobalGrants(principalName, principalType);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+ PrincipalType principalType, String dbName) {
+ return objectStore.listPrincipalDBGrants(principalName, principalType, dbName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
+ PrincipalType principalType, String dbName, String tableName) {
+ return objectStore.listAllTableGrants(principalName, principalType,
+ dbName, tableName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalPartitionGrants(String principalName,
+ PrincipalType principalType, String dbName, String tableName,
+ List<String> partValues,
+ String partName) {
+ return objectStore.listPrincipalPartitionGrants(principalName, principalType,
+ dbName, tableName, partValues, partName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(String principalName,
+ PrincipalType principalType, String dbName,
+ String tableName, String columnName) {
+ return objectStore.listPrincipalTableColumnGrants(principalName, principalType,
+ dbName, tableName, columnName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(
+ String principalName, PrincipalType principalType, String dbName, String tableName,
+ List<String> partVals, String partName, String columnName) {
+ return objectStore.listPrincipalPartitionColumnGrants(principalName, principalType,
+ dbName, tableName, partVals, partName, columnName);
+ }
+
+ @Override
+ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException,
+ MetaException, NoSuchObjectException {
+ return objectStore.grantPrivileges(privileges);
+ }
+
+ @Override
+ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+ throws InvalidObjectException, MetaException, NoSuchObjectException {
+ return objectStore.revokePrivileges(privileges, grantOption);
+ }
+
+ @Override
+ public Role getRole(String roleName) throws NoSuchObjectException {
+ return objectStore.getRole(roleName);
+ }
+
+ @Override
+ public List<String> listRoleNames() {
+ return objectStore.listRoleNames();
+ }
+
+ @Override
+ public List<Role> listRoles(String principalName, PrincipalType principalType) {
+ return objectStore.listRoles(principalName, principalType);
+ }
+
+ @Override
+ public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+ PrincipalType principalType) {
+ return objectStore.listRolesWithGrants(principalName, principalType);
+ }
+
+ @Override
+ public List<RolePrincipalGrant> listRoleMembers(String roleName) {
+ return objectStore.listRoleMembers(roleName);
+ }
+
+ @Override
+ public Partition getPartitionWithAuth(String dbName, String tblName,
+ List<String> partVals, String userName, List<String> groupNames)
+ throws MetaException, NoSuchObjectException, InvalidObjectException {
+ return objectStore.getPartitionWithAuth(dbName, tblName, partVals, userName,
+ groupNames);
+ }
+
+ @Override
+ public List<Partition> getPartitionsWithAuth(String dbName, String tblName,
+ short maxParts, String userName, List<String> groupNames)
+ throws MetaException, NoSuchObjectException, InvalidObjectException {
+ return objectStore.getPartitionsWithAuth(dbName, tblName, maxParts, userName,
+ groupNames);
+ }
+
+ @Override
+ public List<String> listPartitionNamesPs(String dbName, String tblName,
+ List<String> partVals, short maxParts)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts);
+ }
+
+ @Override
+ public List<Partition> listPartitionsPsWithAuth(String dbName, String tblName,
+ List<String> partVals, short maxParts, String userName,
+ List<String> groupNames)
+ throws MetaException, InvalidObjectException, NoSuchObjectException {
+ return objectStore.listPartitionsPsWithAuth(dbName, tblName, partVals, maxParts,
+ userName, groupNames);
+ }
+
+ @Override
+ public long cleanupEvents() {
+ return objectStore.cleanupEvents();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return objectStore.listPrincipalDBGrantsAll(principalName, principalType);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return objectStore.listPrincipalTableGrantsAll(principalName, principalType);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return objectStore.listPrincipalPartitionGrantsAll(principalName, principalType);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return objectStore.listPrincipalTableColumnGrantsAll(principalName, principalType);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return objectStore.listPrincipalPartitionColumnGrantsAll(principalName, principalType);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listGlobalGrantsAll() {
+ return objectStore.listGlobalGrantsAll();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listDBGrantsAll(String dbName) {
+ return objectStore.listDBGrantsAll(dbName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String dbName, String tableName,
+ String partitionName, String columnName) {
+ return objectStore.listPartitionColumnGrantsAll(dbName, tableName, partitionName, columnName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listTableGrantsAll(String dbName, String tableName) {
+ return objectStore.listTableGrantsAll(dbName, tableName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPartitionGrantsAll(String dbName, String tableName,
+ String partitionName) {
+ return objectStore.listPartitionGrantsAll(dbName, tableName, partitionName);
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listTableColumnGrantsAll(String dbName, String tableName,
+ String columnName) {
+ return objectStore.listTableColumnGrantsAll(dbName, tableName, columnName);
+ }
+
+ @Override
+ public ColumnStatistics getTableColumnStatistics(String dbName, String tableName,
+ List<String> colNames) throws MetaException, NoSuchObjectException {
+ return objectStore.getTableColumnStatistics(dbName, tableName, colNames);
+ }
+
+ @Override
+ public boolean deleteTableColumnStatistics(String dbName, String tableName,
+ String colName)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
+ InvalidInputException {
+ return objectStore.deleteTableColumnStatistics(dbName, tableName, colName);
+ }
+
+ @Override
+ public boolean deletePartitionColumnStatistics(String dbName, String tableName,
+ String partName, List<String> partVals, String colName)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
+ InvalidInputException {
+ return objectStore.deletePartitionColumnStatistics(dbName, tableName, partName,
+ partVals, colName);
+ }
+
+ @Override
+ public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
+ InvalidInputException {
+ return objectStore.updateTableColumnStatistics(statsObj);
+ }
+
+ @Override
+ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
+ List<String> partVals)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
+ InvalidInputException {
+ return objectStore.updatePartitionColumnStatistics(statsObj, partVals);
+ }
+
+ @Override
+ public boolean addToken(String tokenIdentifier, String delegationToken) {
+ return false;
+ }
+
+ @Override
+ public boolean removeToken(String tokenIdentifier) {
+ return false;
+ }
+
+ @Override
+ public String getToken(String tokenIdentifier) {
+ return "";
+ }
+
+ @Override
+ public List<String> getAllTokenIdentifiers() {
+ return new ArrayList<String>();
+ }
+
+ @Override
+ public int addMasterKey(String key) throws MetaException {
+ return -1;
+ }
+
+ @Override
+ public void updateMasterKey(Integer seqNo, String key)
+ throws NoSuchObjectException, MetaException {}
+
+ @Override
+ public boolean removeMasterKey(Integer keySeq) {
+ return false;
+ }
+
+ @Override
+ public String[] getMasterKeys() {
+ return new String[0];
+ }
+
+ @Override
+ public void verifySchema() throws MetaException {
+ }
+
+ @Override
+ public String getMetaStoreSchemaVersion() throws MetaException {
+ return objectStore.getMetaStoreSchemaVersion();
+ }
+
+ @Override
+ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) throws MetaException {
+ objectStore.setMetaStoreSchemaVersion(schemaVersion, comment);
+
+ }
+
+ @Override
+ public List<ColumnStatistics> getPartitionColumnStatistics(String dbName,
+ String tblName, List<String> colNames,
+ List<String> partNames)
+ throws MetaException, NoSuchObjectException {
+ return objectStore.getPartitionColumnStatistics(dbName, tblName , colNames, partNames);
+ }
+
+ @Override
+ public boolean doesPartitionExist(String dbName, String tableName,
+ List<String> partVals) throws MetaException, NoSuchObjectException {
+ return objectStore.doesPartitionExist(dbName, tableName, partVals);
+ }
+
+ @Override
+ public boolean addPartitions(String dbName, String tblName, List<Partition> parts)
+ throws InvalidObjectException, MetaException {
+ return objectStore.addPartitions(dbName, tblName, parts);
+ }
+
+ @Override
+ public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec,
+ boolean ifNotExists) throws InvalidObjectException, MetaException {
+ return false;
+ }
+
+ @Override
+ public void dropPartitions(String dbName, String tblName, List<String> partNames)
+ throws MetaException, NoSuchObjectException {
+ objectStore.dropPartitions(dbName, tblName, partNames);
+ }
+
+ @Override
+ public void createFunction(Function func) throws InvalidObjectException,
+ MetaException {
+ objectStore.createFunction(func);
+ }
+
+ @Override
+ public void alterFunction(String dbName, String funcName, Function newFunction)
+ throws InvalidObjectException, MetaException {
+ objectStore.alterFunction(dbName, funcName, newFunction);
+ }
+
+ @Override
+ public void dropFunction(String dbName, String funcName)
+ throws MetaException, NoSuchObjectException, InvalidObjectException,
+ InvalidInputException {
+ objectStore.dropFunction(dbName, funcName);
+ }
+
+ @Override
+ public Function getFunction(String dbName, String funcName)
+ throws MetaException {
+ return objectStore.getFunction(dbName, funcName);
+ }
+
+ @Override
+ public List<Function> getAllFunctions()
+ throws MetaException {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<String> getFunctions(String dbName, String pattern)
+ throws MetaException {
+ return objectStore.getFunctions(dbName, pattern);
+ }
+
+ @Override
+ public AggrStats get_aggr_stats_for(String dbName,
+ String tblName, List<String> partNames, List<String> colNames)
+ throws MetaException {
+ return null;
+ }
+
+ @Override
+ public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+ return objectStore.getNextNotification(rqst);
+ }
+
+ @Override
+ public void addNotificationEvent(NotificationEvent event) {
+ objectStore.addNotificationEvent(event);
+ }
+
+ @Override
+ public void cleanNotificationEvents(int olderThan) {
+ objectStore.cleanNotificationEvents(olderThan);
+ }
+
+ @Override
+ public CurrentNotificationEventId getCurrentNotificationEventId() {
+ return objectStore.getCurrentNotificationEventId();
+ }
+
+ @Override
+ public void flushCache() {
+ objectStore.flushCache();
+ }
+
+ @Override
+ public ByteBuffer[] getFileMetadata(List<Long> fileIds) {
+ return null;
+ }
+
+ @Override
+ public void putFileMetadata(
+ List<Long> fileIds, List<ByteBuffer> metadata, FileMetadataExprType type) {
+ }
+
+ @Override
+ public boolean isFileMetadataSupported() {
+ return false;
+ }
+
+
+ @Override
+ public void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
+ ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
+ }
+
+ @Override
+ public int getTableCount() throws MetaException {
+ return objectStore.getTableCount();
+ }
+
+ @Override
+ public int getPartitionCount() throws MetaException {
+ return objectStore.getPartitionCount();
+ }
+
+ @Override
+ public int getDatabaseCount() throws MetaException {
+ return objectStore.getDatabaseCount();
+ }
+
+ @Override
+ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) {
+ return null;
+ }
+
+ @Override
+ public List<SQLPrimaryKey> getPrimaryKeys(String db_name, String tbl_name)
+ throws MetaException {
+ return null;
+ }
+
+ @Override
+ public List<SQLForeignKey> getForeignKeys(String parent_db_name,
+ String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
+ throws MetaException {
+ return null;
+ }
+
+ @Override
+ public void createTableWithConstraints(Table tbl,
+ List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys)
+ throws InvalidObjectException, MetaException {
+ }
+
+ @Override
+ public void dropConstraint(String dbName, String tableName,
+ String constraintName) throws NoSuchObjectException {
+ }
+
+ @Override
+ public void addPrimaryKeys(List<SQLPrimaryKey> pks)
+ throws InvalidObjectException, MetaException {
+ }
+
+ @Override
+ public void addForeignKeys(List<SQLForeignKey> fks)
+ throws InvalidObjectException, MetaException {
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/3918a639/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
index 81ce67b..1cd32d5 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.EventRequestType;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.FireEventRequest;
import org.apache.hadoop.hive.metastore.api.FireEventRequestData;
@@ -70,12 +69,14 @@ public class TestDbNotificationListener {
@BeforeClass
public static void connectToMetastore() throws Exception {
HiveConf conf = new HiveConf();
- conf.setVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS,
+ conf.setVar(HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS,
DbNotificationListener.class.getName());
conf.setVar(HiveConf.ConfVars.METASTORE_EVENT_DB_LISTENER_TTL, String.valueOf(EVENTS_TTL)+"s");
conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
conf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true);
conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
+ conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
+ DummyRawStoreFailEvent.class.getName());
Class dbNotificationListener =
Class.forName("org.apache.hive.hcatalog.listener.DbNotificationListener");
Class[] classes = dbNotificationListener.getDeclaredClasses();
@@ -101,6 +102,7 @@ public class TestDbNotificationListener {
if (now > Integer.MAX_VALUE) fail("Bummer, time has fallen over the edge");
else startTime = (int) now;
firstEventId = msClient.getCurrentNotificationEventId().getEventId();
+ DummyRawStoreFailEvent.setEventSucceed(true);
}
@Test
@@ -119,6 +121,17 @@ public class TestDbNotificationListener {
assertNull(event.getTableName());
assertTrue(event.getMessage().matches("\\{\"eventType\":\"CREATE_DATABASE\",\"server\":\"\"," +
"\"servicePrincipal\":\"\",\"db\":\"mydb\",\"timestamp\":[0-9]+}"));
+
+ DummyRawStoreFailEvent.setEventSucceed(false);
+ db = new Database("mydb2", "no description", "file:/tmp", emptyParameters);
+ try {
+ msClient.createDatabase(db);
+ } catch (Exception ex) {
+ // expected
+ }
+
+ rsp = msClient.getNextNotification(firstEventId, 0, null);
+ assertEquals(1, rsp.getEventsSize());
}
@Test
@@ -138,6 +151,18 @@ public class TestDbNotificationListener {
assertNull(event.getTableName());
assertTrue(event.getMessage().matches("\\{\"eventType\":\"DROP_DATABASE\",\"server\":\"\"," +
"\"servicePrincipal\":\"\",\"db\":\"dropdb\",\"timestamp\":[0-9]+}"));
+
+ db = new Database("dropdb", "no description", "file:/tmp", emptyParameters);
+ msClient.createDatabase(db);
+ DummyRawStoreFailEvent.setEventSucceed(false);
+ try {
+ msClient.dropDatabase("dropdb");
+ } catch (Exception ex) {
+ // expected
+ }
+
+ rsp = msClient.getNextNotification(firstEventId, 0, null);
+ assertEquals(3, rsp.getEventsSize());
}
@Test
@@ -162,6 +187,18 @@ public class TestDbNotificationListener {
assertEquals("mytable", event.getTableName());
assertTrue(event.getMessage().matches("\\{\"eventType\":\"CREATE_TABLE\",\"server\":\"\"," +
"\"servicePrincipal\":\"\",\"db\":\"default\",\"table\":\"mytable\",\"timestamp\":[0-9]+}"));
+
+ table = new Table("mytable2", "default", "me", startTime, startTime, 0, sd, null,
+ emptyParameters, null, null, null);
+ DummyRawStoreFailEvent.setEventSucceed(false);
+ try {
+ msClient.createTable(table);
+ } catch (Exception ex) {
+ // expected
+ }
+
+ rsp = msClient.getNextNotification(firstEventId, 0, null);
+ assertEquals(1, rsp.getEventsSize());
}
@Test
@@ -192,6 +229,16 @@ public class TestDbNotificationListener {
assertTrue(event.getMessage().matches("\\{\"eventType\":\"ALTER_TABLE\",\"server\":\"\"," +
"\"servicePrincipal\":\"\",\"db\":\"default\",\"table\":\"alttable\"," +
"\"timestamp\":[0-9]+}"));
+
+ DummyRawStoreFailEvent.setEventSucceed(false);
+ try {
+ msClient.alter_table("default", "alttable", table);
+ } catch (Exception ex) {
+ // expected
+ }
+
+ rsp = msClient.getNextNotification(firstEventId, 0, null);
+ assertEquals(2, rsp.getEventsSize());
}
@Test
@@ -218,6 +265,19 @@ public class TestDbNotificationListener {
assertTrue(event.getMessage().matches("\\{\"eventType\":\"DROP_TABLE\",\"server\":\"\"," +
"\"servicePrincipal\":\"\",\"db\":\"default\",\"table\":" +
"\"droptable\",\"timestamp\":[0-9]+}"));
+
+ table = new Table("droptable2", "default", "me", startTime, startTime, 0, sd, null,
+ emptyParameters, null, null, null);
+ msClient.createTable(table);
+ DummyRawStoreFailEvent.setEventSucceed(false);
+ try {
+ msClient.dropTable("default", "droptable2");
+ } catch (Exception ex) {
+ // expected
+ }
+
+ rsp = msClient.getNextNotification(firstEventId, 0, null);
+ assertEquals(3, rsp.getEventsSize());
}
@Test
@@ -249,6 +309,18 @@ public class TestDbNotificationListener {
assertTrue(event.getMessage().matches("\\{\"eventType\":\"ADD_PARTITION\",\"server\":\"\"," +
"\"servicePrincipal\":\"\",\"db\":\"default\",\"table\":" +
"\"addparttable\",\"timestamp\":[0-9]+,\"partitions\":\\[\\{\"ds\":\"today\"}]}"));
+
+ partition = new Partition(Arrays.asList("tomorrow"), "default", "tableDoesNotExist",
+ startTime, startTime, sd, emptyParameters);
+ DummyRawStoreFailEvent.setEventSucceed(false);
+ try {
+ msClient.add_partition(partition);
+ } catch (Exception ex) {
+ // expected
+ }
+
+ rsp = msClient.getNextNotification(firstEventId, 0, null);
+ assertEquals(2, rsp.getEventsSize());
}
@Test
@@ -274,7 +346,6 @@ public class TestDbNotificationListener {
NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null);
assertEquals(3, rsp.getEventsSize());
-
NotificationEvent event = rsp.getEvents().get(2);
assertEquals(firstEventId + 3, event.getEventId());
assertTrue(event.getEventTime() >= startTime);
@@ -285,6 +356,16 @@ public class TestDbNotificationListener {
event.getMessage().matches("\\{\"eventType\":\"ALTER_PARTITION\",\"server\":\"\"," +
"\"servicePrincipal\":\"\",\"db\":\"default\",\"table\":\"alterparttable\"," +
"\"timestamp\":[0-9]+,\"keyValues\":\\{\"ds\":\"today\"}}"));
+
+ DummyRawStoreFailEvent.setEventSucceed(false);
+ try {
+ msClient.alter_partition("default", "alterparttable", newPart, null);
+ } catch (Exception ex) {
+ // expected
+ }
+
+ rsp = msClient.getNextNotification(firstEventId, 0, null);
+ assertEquals(3, rsp.getEventsSize());
}
@Test
@@ -318,6 +399,19 @@ public class TestDbNotificationListener {
assertTrue(event.getMessage().matches("\\{\"eventType\":\"DROP_PARTITION\",\"server\":\"\"," +
"\"servicePrincipal\":\"\",\"db\":\"default\",\"table\":" +
"\"dropparttable\",\"timestamp\":[0-9]+,\"partitions\":\\[\\{\"ds\":\"today\"}]}"));
+
+ partition = new Partition(Arrays.asList("tomorrow"), "default", "dropPartTable",
+ startTime, startTime, sd, emptyParameters);
+ msClient.add_partition(partition);
+ DummyRawStoreFailEvent.setEventSucceed(false);
+ try {
+ msClient.dropPartition("default", "dropparttable", Arrays.asList("tomorrow"), false);
+ } catch (Exception ex) {
+ // expected
+ }
+
+ rsp = msClient.getNextNotification(firstEventId, 0, null);
+ assertEquals(4, rsp.getEventsSize());
}
@Test
http://git-wip-us.apache.org/repos/asf/hive/blob/3918a639/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
index af16f75..fd4527e 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.hive.metastore;
+import com.google.common.collect.Lists;
+
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -25,8 +27,6 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
-
-import com.google.common.collect.Lists;
import junit.framework.TestCase;
import org.apache.hadoop.hive.cli.CliSessionState;
@@ -455,5 +455,4 @@ public class TestMetaStoreEventListener extends TestCase {
assertEquals("true", event.getOldValue());
assertEquals("false", event.getNewValue());
}
-
}
http://git-wip-us.apache.org/repos/asf/hive/blob/3918a639/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index dedd449..a3d322f 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
/**
* Interface for Alter Table and Alter Partition code
@@ -34,6 +35,33 @@ import org.apache.hadoop.hive.metastore.api.Table;
public interface AlterHandler extends Configurable {
/**
+ * @deprecated As of release 2.2.0. Replaced by {@link #alterTable(RawStore, Warehouse, String,
+ * String, Table, EnvironmentContext, HMSHandler)}
+ *
+ * handles alter table, the changes could be cascaded to partitions if applicable
+ *
+ * @param msdb
+ * object to get metadata
+ * @param wh
+ * Hive Warehouse where table data is stored
+ * @param dbname
+ * database of the table being altered
+ * @param name
+ * original name of the table being altered. same as
+ * <i>newTable.tableName</i> if alter op is not a rename.
+ * @param newTable
+ * new table object
+ * @throws InvalidOperationException
+ * thrown if the newTable object is invalid
+ * @throws MetaException
+ * thrown if there is any other error
+ */
+ @Deprecated
+ void alterTable(RawStore msdb, Warehouse wh, String dbname,
+ String name, Table newTable, EnvironmentContext envContext)
+ throws InvalidOperationException, MetaException;
+
+ /**
* handles alter table, the changes could be cascaded to partitions if applicable
*
* @param msdb
@@ -47,18 +75,21 @@ public interface AlterHandler extends Configurable {
* <i>newTable.tableName</i> if alter op is not a rename.
* @param newTable
* new table object
- * @param cascade
- * if the changes will be cascaded to its partitions if applicable
+ * @param handler
+ * HMSHandle object (required to log event notification)
* @throws InvalidOperationException
* thrown if the newTable object is invalid
* @throws MetaException
* thrown if there is any other error
*/
- public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname,
- String name, Table newTable, EnvironmentContext envContext) throws InvalidOperationException,
- MetaException;
+ void alterTable(RawStore msdb, Warehouse wh, String dbname,
+ String name, Table newTable, EnvironmentContext envContext,
+ HMSHandler handler) throws InvalidOperationException, MetaException;
/**
+ * @deprecated As of release 2.2.0. Replaced by {@link #alterPartition(RawStore, Warehouse, String,
+ * String, List, Partition, EnvironmentContext, HMSHandler)}
+ *
* handles alter partition
*
* @param msdb
@@ -78,10 +109,65 @@ public interface AlterHandler extends Configurable {
* @throws AlreadyExistsException
* @throws MetaException
*/
- public abstract Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
- final String name, final List<String> part_vals, final Partition new_part, EnvironmentContext environmentContext)
- throws InvalidOperationException, InvalidObjectException, AlreadyExistsException,
- MetaException;
+ @Deprecated
+ Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+ final String name, final List<String> part_vals, final Partition new_part,
+ EnvironmentContext environmentContext)
+ throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+
+ /**
+ * handles alter partition
+ *
+ * @param msdb
+ * object to get metadata
+ * @param wh
+ * @param dbname
+ * database of the partition being altered
+ * @param name
+ * table of the partition being altered
+ * @param part_vals
+ * original values of the partition being altered
+ * @param new_part
+ * new partition object
+ * @param handler
+ * HMSHandle object (required to log event notification)
+ * @return the altered partition
+ * @throws InvalidOperationException
+ * @throws InvalidObjectException
+ * @throws AlreadyExistsException
+ * @throws MetaException
+ */
+ Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+ final String name, final List<String> part_vals, final Partition new_part, EnvironmentContext environmentContext,
+ HMSHandler handler)
+ throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+
+ /**
+ * @deprecated As of release 2.2.0. Replaced by {@link #alterPartitions(RawStore, Warehouse, String,
+ * String, List, EnvironmentContext, HMSHandler)}
+ *
+ * handles alter partitions
+ *
+ * @param msdb
+ * object to get metadata
+ * @param wh
+ * @param dbname
+ * database of the partition being altered
+ * @param name
+ * table of the partition being altered
+ * @param new_parts
+ * new partition list
+ * @return the altered partition list
+ * @throws InvalidOperationException
+ * @throws InvalidObjectException
+ * @throws AlreadyExistsException
+ * @throws MetaException
+ */
+ @Deprecated
+ List<Partition> alterPartitions(final RawStore msdb, Warehouse wh,
+ final String dbname, final String name, final List<Partition> new_parts,
+ EnvironmentContext environmentContext)
+ throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
/**
* handles alter partitions
@@ -95,14 +181,16 @@ public interface AlterHandler extends Configurable {
* table of the partition being altered
* @param new_parts
* new partition list
+ * @param handler
+ * HMSHandle object (required to log event notification)
* @return the altered partition list
* @throws InvalidOperationException
* @throws InvalidObjectException
* @throws AlreadyExistsException
* @throws MetaException
*/
- public abstract List<Partition> alterPartitions(final RawStore msdb, Warehouse wh,
- final String dbname, final String name, final List<Partition> new_part, EnvironmentContext environmentContext)
- throws InvalidOperationException, InvalidObjectException, AlreadyExistsException,
- MetaException;
-}
+ List<Partition> alterPartitions(final RawStore msdb, Warehouse wh,
+ final String dbname, final String name, final List<Partition> new_parts,
+ EnvironmentContext environmentContext,HMSHandler handler)
+ throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/3918a639/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 40b337a..be7ed32 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hive.metastore;
import com.google.common.collect.Lists;
import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
@@ -43,6 +45,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hive.common.util.HiveStringUtils;
@@ -75,7 +78,15 @@ public class HiveAlterHandler implements AlterHandler {
@Override
public void alterTable(RawStore msdb, Warehouse wh, String dbname,
- String name, Table newt, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException {
+ String name, Table newt, EnvironmentContext environmentContext)
+ throws InvalidOperationException, MetaException {
+ alterTable(msdb, wh, dbname, name, newt, environmentContext, null);
+ }
+
+ @Override
+ public void alterTable(RawStore msdb, Warehouse wh, String dbname,
+ String name, Table newt, EnvironmentContext environmentContext,
+ HMSHandler handler) throws InvalidOperationException, MetaException {
final boolean cascade = environmentContext != null
&& environmentContext.isSetProperties()
&& StatsSetupConst.TRUE.equals(environmentContext.getProperties().get(
@@ -103,6 +114,10 @@ public class HiveAlterHandler implements AlterHandler {
boolean rename = false;
Table oldt = null;
List<ObjectPair<Partition, String>> altps = new ArrayList<ObjectPair<Partition, String>>();
+ List<MetaStoreEventListener> transactionalListeners = null;
+ if (handler != null) {
+ transactionalListeners = handler.getTransactionalListeners();
+ }
try {
msdb.openTransaction();
@@ -252,6 +267,13 @@ public class HiveAlterHandler implements AlterHandler {
}
alterTableUpdateTableColumnStats(msdb, oldt, newt);
+ if (transactionalListeners != null && transactionalListeners.size() > 0) {
+ AlterTableEvent alterTableEvent = new AlterTableEvent(oldt, newt, true, handler);
+ alterTableEvent.setEnvironmentContext(environmentContext);
+ for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+ transactionalListener.onAlterTable(alterTableEvent);
+ }
+ }
// commit the changes
success = msdb.commitTransaction();
} catch (InvalidObjectException e) {
@@ -268,6 +290,7 @@ public class HiveAlterHandler implements AlterHandler {
if (!success) {
msdb.rollbackTransaction();
}
+
if (success && moveData) {
// change the file name in hdfs
// check that src exists otherwise there is no need to copy the data
@@ -329,20 +352,32 @@ public class HiveAlterHandler implements AlterHandler {
}
return ex.getMessage();
}
+
+ @Override
+ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+ final String name, final List<String> part_vals, final Partition new_part,
+ EnvironmentContext environmentContext)
+ throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+ return alterPartition(msdb, wh, dbname, name, part_vals, new_part, environmentContext, null);
+ }
+
@Override
public Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
- final String name, final List<String> part_vals, final Partition new_part, EnvironmentContext environmentContext)
- throws InvalidOperationException, InvalidObjectException, AlreadyExistsException,
- MetaException {
+ final String name, final List<String> part_vals, final Partition new_part,
+ EnvironmentContext environmentContext, HMSHandler handler)
+ throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
boolean success = false;
-
Path srcPath = null;
Path destPath = null;
FileSystem srcFs = null;
- FileSystem destFs = null;
+ FileSystem destFs;
Partition oldPart = null;
String oldPartLoc = null;
String newPartLoc = null;
+ List<MetaStoreEventListener> transactionalListeners = null;
+ if (handler != null) {
+ transactionalListeners = handler.getTransactionalListeners();
+ }
// Set DDL time to now if not specified
if (new_part.getParameters() == null ||
@@ -353,23 +388,44 @@ public class HiveAlterHandler implements AlterHandler {
}
Table tbl = msdb.getTable(dbname, name);
+ if (tbl == null) {
+ throw new InvalidObjectException(
+ "Unable to alter partition because table or database does not exist.");
+ }
+
//alter partition
if (part_vals == null || part_vals.size() == 0) {
try {
+ msdb.openTransaction();
oldPart = msdb.getPartition(dbname, name, new_part.getValues());
if (MetaStoreUtils.requireCalStats(hiveConf, oldPart, new_part, tbl, environmentContext)) {
MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true, environmentContext);
}
+
updatePartColumnStats(msdb, dbname, name, new_part.getValues(), new_part);
msdb.alterPartition(dbname, name, new_part.getValues(), new_part);
+ if (transactionalListeners != null && transactionalListeners.size() > 0) {
+ AlterPartitionEvent alterPartitionEvent =
+ new AlterPartitionEvent(oldPart, new_part, tbl, true, handler);
+ alterPartitionEvent.setEnvironmentContext(environmentContext);
+ for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+ transactionalListener.onAlterPartition(alterPartitionEvent);
+ }
+ }
+ success = msdb.commitTransaction();
} catch (InvalidObjectException e) {
throw new InvalidOperationException("alter is not possible");
} catch (NoSuchObjectException e){
//old partition does not exist
throw new InvalidOperationException("alter is not possible");
+ } finally {
+ if(!success) {
+ msdb.rollbackTransaction();
+ }
}
return oldPart;
}
+
//rename partition
try {
msdb.openTransaction();
@@ -380,21 +436,19 @@ public class HiveAlterHandler implements AlterHandler {
throw new InvalidObjectException(
"Unable to rename partition because old partition does not exist");
}
- Partition check_part = null;
+
+ Partition check_part;
try {
check_part = msdb.getPartition(dbname, name, new_part.getValues());
} catch(NoSuchObjectException e) {
// this means there is no existing partition
check_part = null;
}
+
if (check_part != null) {
throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." +
new_part.getValues());
}
- if (tbl == null) {
- throw new InvalidObjectException(
- "Unable to rename partition because table or database do not exist");
- }
// if the external partition is renamed, the file should not change
if (tbl.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
@@ -420,24 +474,24 @@ public class HiveAlterHandler implements AlterHandler {
"Unable to change partition or table. Database " + dbname + " does not exist"
+ " Check metastore logs for detailed stack." + e.getMessage());
}
+
if (destPath != null) {
newPartLoc = destPath.toString();
oldPartLoc = oldPart.getSd().getLocation();
-
- srcPath = new Path(oldPartLoc);
-
LOG.info("srcPath:" + oldPartLoc);
LOG.info("descPath:" + newPartLoc);
+ srcPath = new Path(oldPartLoc);
srcFs = wh.getFs(srcPath);
destFs = wh.getFs(destPath);
// check that src and dest are on the same file system
if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
- throw new InvalidOperationException("table new location " + destPath
+ throw new InvalidOperationException("New table location " + destPath
+ " is on a different file system than the old location "
- + srcPath + ". This operation is not supported");
+ + srcPath + ". This operation is not supported.");
}
+
try {
- srcFs.exists(srcPath); // check that src exists and also checks
+ srcFs.exists(srcPath);
if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
throw new InvalidOperationException("New location for this table "
+ tbl.getDbName() + "." + tbl.getTableName()
@@ -448,10 +502,12 @@ public class HiveAlterHandler implements AlterHandler {
+ destPath + " for partition " + tbl.getDbName() + "."
+ tbl.getTableName() + " " + new_part.getValues());
}
+
new_part.getSd().setLocation(newPartLoc);
if (MetaStoreUtils.requireCalStats(hiveConf, oldPart, new_part, tbl, environmentContext)) {
MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true, environmentContext);
}
+
String oldPartName = Warehouse.makePartName(tbl.getPartitionKeys(), oldPart.getValues());
try {
//existing partition column stats is no longer valid, remove
@@ -461,15 +517,26 @@ public class HiveAlterHandler implements AlterHandler {
} catch (InvalidInputException iie) {
throw new InvalidOperationException("Unable to update partition stats in table rename." + iie);
}
+
msdb.alterPartition(dbname, name, part_vals, new_part);
}
}
+ if (transactionalListeners != null && transactionalListeners.size() > 0) {
+ AlterPartitionEvent alterPartitionEvent =
+ new AlterPartitionEvent(oldPart, new_part, tbl, true, handler);
+ alterPartitionEvent.setEnvironmentContext(environmentContext);
+ for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+ transactionalListener.onAlterPartition(alterPartitionEvent);
+ }
+ }
+
success = msdb.commitTransaction();
} finally {
if (!success) {
msdb.rollbackTransaction();
}
+
if (success && newPartLoc != null && newPartLoc.compareTo(oldPartLoc) != 0) {
//rename the data directory
try{
@@ -479,21 +546,35 @@ public class HiveAlterHandler implements AlterHandler {
if (!wh.mkdirs(destParentPath, true)) {
throw new IOException("Unable to create path " + destParentPath);
}
+
wh.renameDir(srcPath, destPath, true);
- LOG.info("rename done!");
+ LOG.info("Partition directory rename from " + srcPath + " to " + destPath + " done.");
}
- } catch (IOException e) {
+ } catch (IOException ex) {
+ LOG.error("Cannot rename partition directory from " + srcPath + " to " +
+ destPath, ex);
boolean revertMetaDataTransaction = false;
try {
msdb.openTransaction();
msdb.alterPartition(dbname, name, new_part.getValues(), oldPart);
+ if (transactionalListeners != null && transactionalListeners.size() > 0) {
+ AlterPartitionEvent alterPartitionEvent =
+ new AlterPartitionEvent(new_part, oldPart, tbl, true, handler);
+ alterPartitionEvent.setEnvironmentContext(environmentContext);
+ for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+ transactionalListener.onAlterPartition(alterPartitionEvent);
+ }
+ }
+
revertMetaDataTransaction = msdb.commitTransaction();
- } catch (Exception e1) {
- LOG.error("Reverting metadata opeation failed During HDFS operation failed", e1);
+ } catch (Exception ex2) {
+ LOG.error("Attempt to revert partition metadata change failed. The revert was attempted " +
+ "because associated filesystem rename operation failed with exception " + ex.getMessage(), ex2);
if (!revertMetaDataTransaction) {
msdb.rollbackTransaction();
}
}
+
throw new InvalidOperationException("Unable to access old location "
+ srcPath + " for partition " + tbl.getDbName() + "."
+ tbl.getTableName() + " " + part_vals);
@@ -505,13 +586,33 @@ public class HiveAlterHandler implements AlterHandler {
@Override
public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String dbname,
- final String name, final List<Partition> new_parts, EnvironmentContext environmentContext)
- throws InvalidOperationException, InvalidObjectException, AlreadyExistsException,
- MetaException {
+ final String name, final List<Partition> new_parts,
+ EnvironmentContext environmentContext)
+ throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+ return alterPartitions(msdb, wh, dbname, name, new_parts, environmentContext, null);
+ }
+
+ @Override
+ public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String dbname,
+ final String name, final List<Partition> new_parts, EnvironmentContext environmentContext,
+ HMSHandler handler)
+ throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
List<Partition> oldParts = new ArrayList<Partition>();
List<List<String>> partValsList = new ArrayList<List<String>>();
+ List<MetaStoreEventListener> transactionalListeners = null;
+ if (handler != null) {
+ transactionalListeners = handler.getTransactionalListeners();
+ }
+
Table tbl = msdb.getTable(dbname, name);
+ if (tbl == null) {
+ throw new InvalidObjectException(
+ "Unable to alter partitions because table or database does not exist.");
+ }
+
+ boolean success = false;
try {
+ msdb.openTransaction();
for (Partition tmpPart: new_parts) {
// Set DDL time to now if not specified
if (tmpPart.getParameters() == null ||
@@ -530,10 +631,36 @@ public class HiveAlterHandler implements AlterHandler {
}
updatePartColumnStats(msdb, dbname, name, oldTmpPart.getValues(), tmpPart);
}
+
msdb.alterPartitions(dbname, name, partValsList, new_parts);
+ Iterator<Partition> oldPartsIt = oldParts.iterator();
+ for (Partition newPart : new_parts) {
+ Partition oldPart;
+ if (oldPartsIt.hasNext()) {
+ oldPart = oldPartsIt.next();
+ } else {
+ throw new InvalidOperationException("Missing old partition corresponding to new partition " +
+ "when invoking MetaStoreEventListener for alterPartitions event.");
+ }
+
+ if (transactionalListeners != null && transactionalListeners.size() > 0) {
+ AlterPartitionEvent alterPartitionEvent =
+ new AlterPartitionEvent(oldPart, newPart, tbl, true, handler);
+ for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+ transactionalListener.onAlterPartition(alterPartitionEvent);
+ }
+ }
+ }
+
+ success = msdb.commitTransaction();
} catch (InvalidObjectException | NoSuchObjectException e) {
- throw new InvalidOperationException("Alter partition operation fails: " + e);
+ throw new InvalidOperationException("Alter partition operation failed: " + e);
+ } finally {
+ if(!success) {
+ msdb.rollbackTransaction();
+ }
}
+
return oldParts;
}