You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2017/11/21 20:07:35 UTC
[1/7] hive git commit: HIVE-17967 Move HiveMetaStore class. This
closes #270 (Alan Gates, reviewed by Thejas Nair).
Repository: hive
Updated Branches:
refs/heads/master 89b6566b0 -> 8fcc7f324
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java
new file mode 100644
index 0000000..b1cd7db
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.hooks.JDOConnectionURLHook;
+
+/**
+ *
+ * DummyJdoConnectionUrlHook.
+ *
+ * An implementation of JDOConnectionURLHook which simply returns CORRECT_URL when
+ * getJdoConnectionUrl is called.
+ */
+public class DummyJdoConnectionUrlHook implements JDOConnectionURLHook {
+
+ public static final String initialUrl = "BAD_URL";
+ public static final String newUrl = "CORRECT_URL";
+
+ @Override
+ public String getJdoConnectionUrl(Configuration conf) throws Exception {
+ return newUrl;
+ }
+
+ @Override
+ public void notifyBadConnectionUrl(String url) {
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
new file mode 100644
index 0000000..da88bf5
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -0,0 +1,1010 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+import org.apache.hadoop.hive.metastore.api.WMTrigger;
+import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TableMeta;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.thrift.TException;
+import org.junit.Assert;
+
+/**
+ *
+ * DummyRawStoreForJdoConnection.
+ *
+ * An implementation of RawStore that verifies the DummyJdoConnectionUrlHook has already been
+ * applied when this class's setConf method is called, by checking that the value of the
+ * METASTORECONNECTURLKEY ConfVar has been updated.
+ *
+ * All non-void methods return default values.
+ */
+public class DummyRawStoreForJdoConnection implements RawStore {
+
+ @Override
+ public Configuration getConf() {
+
+ return null;
+ }
+
+ @Override
+ public void setConf(Configuration arg0) {
+ String expected = DummyJdoConnectionUrlHook.newUrl;
+ String actual = MetastoreConf.getVar(arg0, MetastoreConf.ConfVars.CONNECTURLKEY);
+
+ Assert.assertEquals("The expected URL used by JDO to connect to the metastore: " + expected +
+ " did not match the actual value when the Raw Store was initialized: " + actual,
+ expected, actual);
+ }
+
+ @Override
+ public void shutdown() {
+
+
+ }
+
+ @Override
+ public boolean openTransaction() {
+
+ return false;
+ }
+
+ @Override
+ public boolean commitTransaction() {
+ return false;
+ }
+
+ @Override
+ public boolean isActiveTransaction() {
+ return false;
+ }
+
+ @Override
+ public void rollbackTransaction() {
+ }
+
+ @Override
+ public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+
+
+ }
+
+ @Override
+ public Database getDatabase(String name) throws NoSuchObjectException {
+
+ return null;
+ }
+
+ @Override
+ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException {
+
+ return false;
+ }
+
+ @Override
+ public boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException,
+ MetaException {
+
+ return false;
+ }
+
+ @Override
+ public List<String> getDatabases(String pattern) throws MetaException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<String> getAllDatabases() throws MetaException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public boolean createType(Type type) {
+
+ return false;
+ }
+
+ @Override
+ public Type getType(String typeName) {
+
+ return null;
+ }
+
+ @Override
+ public boolean dropType(String typeName) {
+
+ return false;
+ }
+
+ @Override
+ public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+
+
+ }
+
+ @Override
+ public boolean dropTable(String dbName, String tableName) throws MetaException {
+
+ return false;
+ }
+
+ @Override
+ public Table getTable(String dbName, String tableName) throws MetaException {
+
+ return null;
+ }
+
+ @Override
+ public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
+
+ return false;
+ }
+
+ @Override
+ public Partition getPartition(String dbName, String tableName, List<String> part_vals)
+ throws MetaException, NoSuchObjectException {
+
+ return null;
+ }
+
+ @Override
+ public boolean dropPartition(String dbName, String tableName, List<String> part_vals)
+ throws MetaException {
+
+ return false;
+ }
+
+ @Override
+ public List<Partition> getPartitions(String dbName, String tableName, int max)
+ throws MetaException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException,
+ MetaException {
+
+
+ }
+
+ @Override
+ public List<String> getTables(String dbName, String pattern) throws MetaException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<String> getTables(String dbName, String pattern, TableType tableType) throws MetaException {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<TableMeta> getTableMeta(String dbNames, String tableNames, List<String> tableTypes)
+ throws MetaException {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<Table> getTableObjectsByName(String dbname, List<String> tableNames)
+ throws MetaException, UnknownDBException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<String> getAllTables(String dbName) throws MetaException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<String> listTableNamesByFilter(String dbName, String filter, short max_tables)
+ throws MetaException, UnknownDBException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<String> listPartitionNames(String db_name, String tbl_name, short max_parts)
+ throws MetaException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, List<FieldSchema> cols, boolean applyDistinct, String filter, boolean ascending, List<FieldSchema> order, long maxParts) throws MetaException {
+ return null;
+ }
+
+ @Override
+ public List<String> listPartitionNamesByFilter(String db_name, String tbl_name, String filter,
+ short max_parts) throws MetaException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public void alterPartition(String db_name, String tbl_name, List<String> part_vals,
+ Partition new_part) throws InvalidObjectException, MetaException {
+
+
+ }
+
+ @Override
+ public void alterPartitions(String db_name, String tbl_name, List<List<String>> part_vals_list,
+ List<Partition> new_parts) throws InvalidObjectException, MetaException {
+
+
+ }
+
+
+ @Override
+ public boolean addIndex(Index index) throws InvalidObjectException, MetaException {
+
+ return false;
+ }
+
+ @Override
+ public Index getIndex(String dbName, String origTableName, String indexName)
+ throws MetaException {
+
+ return null;
+ }
+
+ @Override
+ public boolean dropIndex(String dbName, String origTableName, String indexName)
+ throws MetaException {
+
+ return false;
+ }
+
+ @Override
+ public List<Index> getIndexes(String dbName, String origTableName, int max)
+ throws MetaException {
+
+ return null;
+ }
+
+ @Override
+ public List<String> listIndexNames(String dbName, String origTableName, short max)
+ throws MetaException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public void alterIndex(String dbname, String baseTblName, String name, Index newIndex)
+ throws InvalidObjectException, MetaException {
+
+
+ }
+
+ @Override
+ public List<Partition> getPartitionsByFilter(String dbName, String tblName, String filter,
+ short maxParts) throws MetaException, NoSuchObjectException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<Partition> getPartitionsByNames(String dbName, String tblName,
+ List<String> partNames) throws MetaException, NoSuchObjectException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
+ String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
+ return false;
+ }
+
+ @Override
+ public int getNumPartitionsByFilter(String dbName, String tblName, String filter)
+ throws MetaException, NoSuchObjectException {
+ return -1;
+ }
+
+ @Override
+ public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr)
+ throws MetaException, NoSuchObjectException {
+ return -1;
+ }
+
+ @Override
+ public Table markPartitionForEvent(String dbName, String tblName, Map<String, String> partVals,
+ PartitionEventType evtType) throws MetaException, UnknownTableException,
+ InvalidPartitionException, UnknownPartitionException {
+
+ return null;
+ }
+
+ @Override
+ public boolean isPartitionMarkedForEvent(String dbName, String tblName,
+ Map<String, String> partName, PartitionEventType evtType) throws MetaException,
+ UnknownTableException, InvalidPartitionException, UnknownPartitionException {
+
+ return false;
+ }
+
+ @Override
+ public boolean addRole(String rowName, String ownerName) throws InvalidObjectException,
+ MetaException, NoSuchObjectException {
+
+ return false;
+ }
+
+ @Override
+ public boolean removeRole(String roleName) throws MetaException, NoSuchObjectException {
+
+ return false;
+ }
+
+ @Override
+ public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor,
+ PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException,
+ InvalidObjectException {
+
+ return false;
+ }
+
+ @Override
+ public boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption)
+ throws MetaException, NoSuchObjectException {
+
+ return false;
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List<String> groupNames)
+ throws InvalidObjectException, MetaException {
+
+ return null;
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName,
+ List<String> groupNames) throws InvalidObjectException, MetaException {
+
+ return null;
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName,
+ String userName, List<String> groupNames) throws InvalidObjectException, MetaException {
+
+ return null;
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName,
+ String partition, String userName, List<String> groupNames) throws InvalidObjectException,
+ MetaException {
+
+ return null;
+ }
+
+ @Override
+ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName,
+ String partitionName, String columnName, String userName, List<String> groupNames)
+ throws InvalidObjectException, MetaException {
+
+ return null;
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
+ PrincipalType principalType) {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+ PrincipalType principalType, String dbName) {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
+ PrincipalType principalType, String dbName, String tableName) {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalPartitionGrants(String principalName,
+ PrincipalType principalType, String dbName, String tableName, List<String> partValues,
+ String partName) {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(String principalName,
+ PrincipalType principalType, String dbName, String tableName, String columnName) {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(String principalName,
+ PrincipalType principalType, String dbName, String tableName, List<String> partVals,
+ String partName, String columnName) {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException,
+ MetaException, NoSuchObjectException {
+
+ return false;
+ }
+
+ @Override
+ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+ throws InvalidObjectException, MetaException, NoSuchObjectException {
+
+ return false;
+ }
+
+ @Override
+ public Role getRole(String roleName) throws NoSuchObjectException {
+
+ return null;
+ }
+
+ @Override
+ public List<String> listRoleNames() {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<Role> listRoles(String principalName, PrincipalType principalType) {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+ PrincipalType principalType) {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<RolePrincipalGrant> listRoleMembers(String roleName) {
+ return null;
+ }
+
+ @Override
+ public Partition getPartitionWithAuth(String dbName, String tblName, List<String> partVals,
+ String user_name, List<String> group_names) throws MetaException, NoSuchObjectException,
+ InvalidObjectException {
+
+ return null;
+ }
+
+ @Override
+ public List<Partition> getPartitionsWithAuth(String dbName, String tblName, short maxParts,
+ String userName, List<String> groupNames) throws MetaException, NoSuchObjectException,
+ InvalidObjectException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<String> listPartitionNamesPs(String db_name, String tbl_name, List<String> part_vals,
+ short max_parts) throws MetaException, NoSuchObjectException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<Partition> listPartitionsPsWithAuth(String db_name, String tbl_name,
+ List<String> part_vals, short max_parts, String userName, List<String> groupNames)
+ throws MetaException, InvalidObjectException, NoSuchObjectException {
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public long cleanupEvents() {
+
+ return 0;
+ }
+
+ @Override
+ public boolean addToken(String tokenIdentifier, String delegationToken) {
+ return false;
+ }
+
+ @Override
+ public boolean removeToken(String tokenIdentifier) {
+ return false;
+ }
+
+ @Override
+ public String getToken(String tokenIdentifier) {
+ return null;
+ }
+
+ @Override
+ public List<String> getAllTokenIdentifiers() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public int addMasterKey(String key) {
+ return 0;
+ }
+
+ @Override
+ public void updateMasterKey(Integer seqNo, String key) {
+ }
+
+ @Override
+ public boolean removeMasterKey(Integer keySeq) {
+ return false;
+ }
+
+ @Override
+ public String[] getMasterKeys() {
+ return new String[0];
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
+ String principalName, PrincipalType principalType) {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listGlobalGrantsAll() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listDBGrantsAll(String dbName) {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String dbName, String tableName, String partitionName, String columnName) {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listTableGrantsAll(String dbName, String tableName) {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listPartitionGrantsAll(String dbName, String tableName, String partitionName) {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<HiveObjectPrivilege> listTableColumnGrantsAll(String dbName, String tableName, String columnName) {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public ColumnStatistics getTableColumnStatistics(String dbName, String tableName,
+ List<String> colName) throws MetaException, NoSuchObjectException {
+ return null;
+ }
+
+ @Override
+ public boolean deleteTableColumnStatistics(String dbName, String tableName,
+ String colName)
+ throws NoSuchObjectException, MetaException, InvalidObjectException {
+ return false;
+ }
+
+
+ @Override
+ public boolean deletePartitionColumnStatistics(String dbName, String tableName,
+ String partName, List<String> partVals, String colName)
+ throws NoSuchObjectException, MetaException, InvalidObjectException,
+ InvalidInputException {
+ return false;
+
+ }
+
+ @Override
+ public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+ throws NoSuchObjectException, MetaException, InvalidObjectException {
+ return false;
+ }
+
+ @Override
+ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,List<String> partVals)
+ throws NoSuchObjectException, MetaException, InvalidObjectException {
+ return false;
+ }
+
+ @Override
+ public void verifySchema() throws MetaException {
+ }
+
+ @Override
+ public String getMetaStoreSchemaVersion() throws MetaException {
+ return null;
+ }
+
+ @Override
+ public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException {
+ }
+
+ @Override
+ public List<ColumnStatistics> getPartitionColumnStatistics(String dbName,
+ String tblName, List<String> colNames, List<String> partNames)
+ throws MetaException, NoSuchObjectException {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public boolean doesPartitionExist(String dbName, String tableName,
+ List<String> partVals) throws MetaException, NoSuchObjectException {
+ return false;
+ }
+
+ @Override
+ public boolean addPartitions(String dbName, String tblName, List<Partition> parts)
+ throws InvalidObjectException, MetaException {
+ return false;
+ }
+
+ @Override
+ public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
+ return false;
+ }
+
+ @Override
+ public void dropPartitions(String dbName, String tblName, List<String> partNames) {
+ }
+
+ @Override
+ public void createFunction(Function func) throws InvalidObjectException,
+ MetaException {
+ }
+
+ @Override
+ public void alterFunction(String dbName, String funcName, Function newFunction)
+ throws InvalidObjectException, MetaException {
+ }
+
+ @Override
+ public void dropFunction(String dbName, String funcName)
+ throws MetaException, NoSuchObjectException, InvalidObjectException,
+ InvalidInputException {
+ }
+
+ @Override
+ public Function getFunction(String dbName, String funcName)
+ throws MetaException {
+ return null;
+ }
+
+ @Override
+ public List<Function> getAllFunctions()
+ throws MetaException {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List<String> getFunctions(String dbName, String pattern)
+ throws MetaException {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public AggrStats get_aggr_stats_for(String dbName,
+ String tblName, List<String> partNames, List<String> colNames)
+ throws MetaException {
+ return null;
+ }
+
+ @Override
+ public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+ return null;
+ }
+
+ @Override
+ public void addNotificationEvent(NotificationEvent event) {
+
+ }
+
+ @Override
+ public void cleanNotificationEvents(int olderThan) {
+
+ }
+
+ @Override
+ public CurrentNotificationEventId getCurrentNotificationEventId() {
+ return null;
+ }
+
+ @Override
+ public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) {
+ return null;
+ }
+
+ public void flushCache() {
+
+ }
+
+ @Override
+ public ByteBuffer[] getFileMetadata(List<Long> fileIds) {
+ return null;
+ }
+
+ @Override
+ public void putFileMetadata(
+ List<Long> fileIds, List<ByteBuffer> metadata, FileMetadataExprType type) {
+ }
+
+ @Override
+ public boolean isFileMetadataSupported() {
+ return false;
+ }
+
+ @Override
+ public void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
+ ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
+ }
+
+ @Override
+ public int getTableCount() throws MetaException {
+ return 0;
+ }
+
+ @Override
+ public int getPartitionCount() throws MetaException {
+ return 0;
+ }
+
+ @Override
+ public int getDatabaseCount() throws MetaException {
+ return 0;
+ }
+
+ @Override
+ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) {
+ return null;
+ }
+
+ @Override
+ public List<SQLPrimaryKey> getPrimaryKeys(String db_name, String tbl_name)
+ throws MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public List<SQLForeignKey> getForeignKeys(String parent_db_name,
+ String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
+ throws MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public List<SQLUniqueConstraint> getUniqueConstraints(String db_name, String tbl_name)
+ throws MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public List<SQLNotNullConstraint> getNotNullConstraints(String db_name, String tbl_name)
+ throws MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public List<String> createTableWithConstraints(Table tbl,
+ List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+ List<SQLUniqueConstraint> uniqueConstraints,
+ List<SQLNotNullConstraint> notNullConstraints)
+ throws InvalidObjectException, MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public void dropConstraint(String dbName, String tableName,
+ String constraintName) throws NoSuchObjectException {
+ // TODO Auto-generated method stub
+ }
+
+ @Override
+ public List<String> addPrimaryKeys(List<SQLPrimaryKey> pks)
+ throws InvalidObjectException, MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public List<String> addForeignKeys(List<SQLForeignKey> fks)
+ throws InvalidObjectException, MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ public List<String> addUniqueConstraints(List<SQLUniqueConstraint> uks)
+ throws InvalidObjectException, MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public List<String> addNotNullConstraints(List<SQLNotNullConstraint> nns)
+ throws InvalidObjectException, MetaException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public Map<String, List<ColumnStatisticsObj>> getColStatsForTablePartitions(String dbName,
+ String tableName) throws MetaException, NoSuchObjectException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public String getMetastoreDbUuid() throws MetaException {
+ throw new MetaException("Get metastore uuid is not implemented");
+ }
+
+ @Override
+ public void createResourcePlan(
+ WMResourcePlan resourcePlan, int defaultPoolSize) throws MetaException {
+ }
+
+ @Override
+ public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException {
+ return null;
+ }
+
+ @Override
+ public List<WMResourcePlan> getAllResourcePlans() throws MetaException {
+ return null;
+ }
+
+ @Override
+ public WMFullResourcePlan alterResourcePlan(
+ String name, WMResourcePlan resourcePlan, boolean canActivateDisabled)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ return null;
+ }
+
+ @Override
+ public WMFullResourcePlan getActiveResourcePlan() throws MetaException {
+ return null;
+ }
+
+ @Override
+ public boolean validateResourcePlan(String name)
+ throws NoSuchObjectException, InvalidObjectException, MetaException {
+ return false;
+ }
+
+ @Override
+ public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException {
+ }
+
+ @Override
+ public void createWMTrigger(WMTrigger trigger) throws MetaException {
+ }
+
+ @Override
+ public void alterWMTrigger(WMTrigger trigger)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+ }
+
+ @Override
+ public void dropWMTrigger(String resourcePlanName, String triggerName)
+ throws NoSuchObjectException, MetaException {
+ }
+
+ @Override
+ public List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
+ throws NoSuchObjectException, MetaException {
+ return null;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
new file mode 100644
index 0000000..f1a08dd
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.junit.Test;
+
+public class TestHiveMetastoreCli {
+ private static final String[] CLI_ARGUMENTS = { "9999" };
+
+ @Test
+ public void testDefaultCliPortValue() {
+ Configuration configuration = MetastoreConf.newMetastoreConf();
+ HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
+ assert (cli.getPort() == MetastoreConf.getIntVar(configuration, ConfVars.SERVER_PORT));
+ }
+
+ @Test
+ public void testOverriddenCliPortValue() {
+ Configuration configuration = MetastoreConf.newMetastoreConf();
+ HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
+ cli.parse(TestHiveMetastoreCli.CLI_ARGUMENTS);
+
+ assert (cli.getPort() == 9999);
+ }
+
+ @Test
+ public void testOverriddenMetastoreServerPortValue() {
+ Configuration configuration = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setLongVar(configuration, ConfVars.SERVER_PORT, 12345);
+
+ HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
+
+ assert (cli.getPort() == 12345);
+ }
+
+ @Test
+ public void testCliOverridesConfiguration() {
+ Configuration configuration = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setLongVar(configuration, ConfVars.SERVER_PORT, 12345);
+
+ HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
+ cli.parse(CLI_ARGUMENTS);
+
+ assert (cli.getPort() == 9999);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java
new file mode 100644
index 0000000..7c54354
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.junit.Test;
+
+/**
+ * TestMetaStoreConnectionUrlHook
+ * Verifies that when an instance of an implementation of RawStore is initialized, the connection
+ * URL has already been updated by any metastore connect URL hooks.
+ */
+public class TestMetaStoreConnectionUrlHook {
+
+ @Test
+ public void testUrlHook() throws Exception {
+ Configuration conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setVar(conf, ConfVars.CONNECTURLHOOK, DummyJdoConnectionUrlHook.class.getName());
+ MetastoreConf.setVar(conf, ConfVars.CONNECTURLKEY, DummyJdoConnectionUrlHook.initialUrl);
+ MetastoreConf.setVar(conf, ConfVars.RAW_STORE_IMPL, DummyRawStoreForJdoConnection.class.getName());
+ MetaStoreUtils.setConfForStandloneMode(conf);
+
+ // Instantiating the HMSHandler with hive.metastore.checkForDefaultDb will cause it to
+ // initialize an instance of the DummyRawStoreForJdoConnection
+ HiveMetaStore.HMSHandler hms = new HiveMetaStore.HMSHandler(
+ "test_metastore_connection_url_hook_hms_handler", conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
index 3c31118..24ea62e 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -23,6 +23,7 @@ import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import org.apache.hadoop.hive.metastore.ObjectStore.RetryingExecutor;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
@@ -32,6 +33,9 @@ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.api.Role;
@@ -41,8 +45,11 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.messaging.EventMessage;
import org.apache.hadoop.hive.metastore.metrics.Metrics;
import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.model.MNotificationLog;
+import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.junit.Assert;
import org.junit.Assume;
@@ -59,6 +66,12 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
public class TestObjectStore {
private ObjectStore objectStore = null;
@@ -458,5 +471,139 @@ public class TestObjectStore {
Assert.assertEquals(value, objectStore.getProp().getProperty(key));
Assert.assertNull(objectStore.getProp().getProperty(key1));
}
+
+ /**
+ * Test notification operations
+ */
+ // TODO MS-SPLIT uncomment once we move EventMessage over
+ @Test
+ public void testNotificationOps() throws InterruptedException {
+ final int NO_EVENT_ID = 0;
+ final int FIRST_EVENT_ID = 1;
+ final int SECOND_EVENT_ID = 2;
+
+ NotificationEvent event =
+ new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), "");
+ NotificationEventResponse eventResponse;
+ CurrentNotificationEventId eventId;
+
+ // Verify that there is no notifications available yet
+ eventId = objectStore.getCurrentNotificationEventId();
+ Assert.assertEquals(NO_EVENT_ID, eventId.getEventId());
+
+ // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID
+ objectStore.addNotificationEvent(event);
+ Assert.assertEquals(FIRST_EVENT_ID, event.getEventId());
+ objectStore.addNotificationEvent(event);
+ Assert.assertEquals(SECOND_EVENT_ID, event.getEventId());
+
+ // Verify that objectStore fetches the latest notification event ID
+ eventId = objectStore.getCurrentNotificationEventId();
+ Assert.assertEquals(SECOND_EVENT_ID, eventId.getEventId());
+
+ // Verify that getNextNotification() returns all events
+ eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
+ Assert.assertEquals(2, eventResponse.getEventsSize());
+ Assert.assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
+ Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId());
+
+ // Verify that getNextNotification(last) returns events after a specified event
+ eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID));
+ Assert.assertEquals(1, eventResponse.getEventsSize());
+ Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
+
+ // Verify that getNextNotification(last) returns zero events if there are no more notifications available
+ eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID));
+ Assert.assertEquals(0, eventResponse.getEventsSize());
+
+ // Verify that cleanNotificationEvents() cleans up all old notifications
+ Thread.sleep(1);
+ objectStore.cleanNotificationEvents(1);
+ eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
+ Assert.assertEquals(0, eventResponse.getEventsSize());
+ }
+
+ @Ignore(
+ "This test is here to allow testing with other databases like mysql / postgres etc\n"
+ + " with user changes to the code. This cannot be run on apache derby because of\n"
+ + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html"
+ )
+ @Test
+ public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException {
+
+ final int NUM_THREADS = 10;
+ CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS,
+ () -> LoggerFactory.getLogger("test")
+ .debug(NUM_THREADS + " threads going to add notification"));
+
+ Configuration conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
+ MockPartitionExpressionProxy.class.getName());
+ /*
+ Below are the properties that need to be set based on what database this test is going to be run
+ */
+
+// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver");
+// conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,
+// "jdbc:mysql://localhost:3306/metastore_db");
+// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "");
+// conf.setVar(HiveConf.ConfVars.METASTOREPWD, "");
+
+ /*
+ we have to add this one manually as for tests the db is initialized via the metastoreDiretSQL
+ and we don't run the schema creation sql that includes the an insert for notification_sequence
+ which can be locked. the entry in notification_sequence happens via notification_event insertion.
+ */
+ objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute();
+ objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute();
+
+ objectStore.addNotificationEvent(
+ new NotificationEvent(0, 0,
+ EventMessage.EventType.CREATE_DATABASE.toString(),
+ "CREATE DATABASE DB initial"));
+
+ ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS);
+ for (int i = 0; i < NUM_THREADS; i++) {
+ final int n = i;
+
+ executorService.execute(
+ () -> {
+ ObjectStore store = new ObjectStore();
+ store.setConf(conf);
+
+ String eventType = EventMessage.EventType.CREATE_DATABASE.toString();
+ NotificationEvent dbEvent =
+ new NotificationEvent(0, 0, eventType,
+ "CREATE DATABASE DB" + n);
+ System.out.println("ADDING NOTIFICATION");
+
+ try {
+ cyclicBarrier.await();
+ } catch (InterruptedException | BrokenBarrierException e) {
+ throw new RuntimeException(e);
+ }
+ store.addNotificationEvent(dbEvent);
+ System.out.println("FINISH NOTIFICATION");
+ });
+ }
+ executorService.shutdown();
+ Assert.assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS));
+
+ // we have to setup this again as the underlying PMF keeps getting reinitialized with original
+ // reference closed
+ ObjectStore store = new ObjectStore();
+ store.setConf(conf);
+
+ NotificationEventResponse eventResponse = store.getNextNotification(
+ new NotificationEventRequest());
+ Assert.assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize());
+ long previousId = 0;
+ for (NotificationEvent event : eventResponse.getEvents()) {
+ Assert.assertTrue("previous:" + previousId + " current:" + event.getEventId(),
+ previousId < event.getEventId());
+ Assert.assertTrue(previousId + 1 == event.getEventId());
+ previousId = event.getEventId();
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/conf/TestMetastoreConf.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/conf/TestMetastoreConf.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/conf/TestMetastoreConf.java
index 62335c6..e6e8fee 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/conf/TestMetastoreConf.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/conf/TestMetastoreConf.java
@@ -146,7 +146,7 @@ public class TestMetastoreConf {
"test.double", "1.8",
"test.bool", "false",
"test.time", "30s",
- "test.str.list", "d,e",
+ "test.str.list", "d",
"test.class", TestClass2.class.getName()
));
conf = MetastoreConf.newMetastoreConf();
@@ -160,9 +160,8 @@ public class TestMetastoreConf {
Assert.assertEquals(30000,
MetastoreConf.getTimeVar(conf, ConfVars.TIME_TEST_ENTRY, TimeUnit.MILLISECONDS));
Collection<String> list = MetastoreConf.getStringCollection(conf, ConfVars.STR_LIST_ENTRY);
- Assert.assertEquals(2, list.size());
+ Assert.assertEquals(1, list.size());
Assert.assertTrue(list.contains("d"));
- Assert.assertTrue(list.contains("e"));
Assert.assertSame(TestClass2.class,
MetastoreConf.getClass(conf, ConfVars.CLASS_TEST_ENTRY, TestClass1.class, Runnable.class));
Assert.assertEquals("1.8", MetastoreConf.get(conf, ConfVars.DOUBLE_TEST_ENTRY.getVarname()));
@@ -182,7 +181,7 @@ public class TestMetastoreConf {
Assert.assertEquals(24, MetastoreConf.getLongVar(conf, ConfVars.LONG_TEST_ENTRY));
}
- @Ignore // Ignore for now as Hive's tests create a hive-site.xml in the test directory
+ @Test
public void readHiveSiteWithHiveConfDir() throws IOException {
createConfFile("hive-site.xml", false, "HIVE_CONF_DIR", instaMap(
"test.double", "1.8"
@@ -192,7 +191,7 @@ public class TestMetastoreConf {
0.01);
}
- @Ignore // Ignore for now as Hive's tests create a hive-site.xml in the test directory
+ @Test
public void readHiveSiteWithHiveHomeDir() throws IOException {
createConfFile("hive-site.xml", true, "HIVE_HOME", instaMap(
"test.bool", "false"
@@ -201,7 +200,7 @@ public class TestMetastoreConf {
Assert.assertFalse(MetastoreConf.getBoolVar(conf, ConfVars.BOOLEAN_TEST_ENTRY));
}
- @Ignore // Ignore for now as Hive's tests create a hive-metastoresite.xml in the test directory
+ @Test
public void readHiveMetastoreSiteWithHiveConfDir() throws IOException {
createConfFile("hivemetastore-site.xml", false, "HIVE_CONF_DIR", instaMap(
"test.double", "1.8"
@@ -211,7 +210,7 @@ public class TestMetastoreConf {
0.01);
}
- @Ignore // Ignore for now as Hive's tests create a hive-metastoresite.xml in the test directory
+ @Test
public void readHiveMetastoreSiteWithHiveHomeDir() throws IOException {
createConfFile("hivemetastore-site.xml", true, "HIVE_HOME", instaMap(
"test.bool", "false"
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java
index b5f37eb..f91b062 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java
@@ -19,8 +19,11 @@ package org.apache.hadoop.hive.metastore.utils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.AccessControlException;
@@ -30,8 +33,16 @@ import org.mockito.Mockito;
import javax.security.auth.login.LoginException;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Random;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
public class TestHdfsUtils {
private Random rand = new Random();
@@ -190,4 +201,145 @@ public class TestHdfsUtils {
}
}
+ /**
+ * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, String, FileSystem, Path, boolean)}
+ * does not throw an exception when setting the group and without recursion.
+ */
+ @Test
+ public void testSetFullFileStatusFailInheritGroup() throws IOException {
+ Configuration conf = new Configuration();
+ conf.set("dfs.namenode.acls.enabled", "false");
+
+ HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
+ FileStatus mockSourceStatus = mock(FileStatus.class);
+ FileSystem fs = mock(FileSystem.class);
+
+ when(mockSourceStatus.getGroup()).thenReturn("fakeGroup1");
+ when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
+ doThrow(RuntimeException.class).when(fs).setOwner(any(Path.class), any(String.class), any(String.class));
+
+ HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "fakeGroup2", fs, new Path("fakePath"), false);
+ verify(fs).setOwner(any(Path.class), any(String.class), any(String.class));
+ }
+
+ /**
+ * Tests that link HdfsUtils#setFullFileStatus
+ * does not thrown an exception when setting ACLs and without recursion.
+ */
+ @Test
+ public void testSetFullFileStatusFailInheritAcls() throws IOException {
+ Configuration conf = new Configuration();
+ conf.set("dfs.namenode.acls.enabled", "true");
+
+ HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
+ FileStatus mockSourceStatus = mock(FileStatus.class);
+ AclStatus mockAclStatus = mock(AclStatus.class);
+ FileSystem mockFs = mock(FileSystem.class);
+
+ when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777));
+ when(mockAclStatus.toString()).thenReturn("");
+ when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
+ when(mockHadoopFileStatus.getAclEntries()).thenReturn(new ArrayList<>());
+ when(mockHadoopFileStatus.getAclStatus()).thenReturn(mockAclStatus);
+ doThrow(RuntimeException.class).when(mockFs).setAcl(any(Path.class), any(List.class));
+
+ HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, null, mockFs, new Path("fakePath"), false);
+ verify(mockFs).setAcl(any(Path.class), any(List.class));
+ }
+
+ /**
+ * Tests that HdfsUtils#setFullFileStatus
+ * does not thrown an exception when setting permissions and without recursion.
+ */
+ @Test
+ public void testSetFullFileStatusFailInheritPerms() throws IOException {
+ Configuration conf = new Configuration();
+ conf.set("dfs.namenode.acls.enabled", "false");
+
+ HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
+ FileStatus mockSourceStatus = mock(FileStatus.class);
+ FileSystem mockFs = mock(FileSystem.class);
+
+ when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777));
+ when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
+ doThrow(RuntimeException.class).when(mockFs).setPermission(any(Path.class), any(FsPermission.class));
+
+ HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, null, mockFs, new Path("fakePath"),
+ false);
+ verify(mockFs).setPermission(any(Path.class), any(FsPermission.class));
+ }
+
+ /**
+ * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, String, FileSystem, Path, boolean)}
+ * does not throw an exception when setting the group and with recursion.
+ */
+ @Test
+ public void testSetFullFileStatusFailInheritGroupRecursive() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set("dfs.namenode.acls.enabled", "false");
+
+ String fakeSourceGroup = "fakeGroup1";
+ String fakeTargetGroup = "fakeGroup2";
+ Path fakeTarget = new Path("fakePath");
+ HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
+ FileStatus mockSourceStatus = mock(FileStatus.class);
+ FsShell mockFsShell = mock(FsShell.class);
+
+ when(mockSourceStatus.getGroup()).thenReturn(fakeSourceGroup);
+ when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
+ doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class));
+
+ HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, fakeTargetGroup, mock(FileSystem.class), fakeTarget,
+ true, mockFsShell);
+ verify(mockFsShell).run(new String[]{"-chgrp", "-R", fakeSourceGroup, fakeTarget.toString()});
+ }
+
+ /**
+ * Tests that HdfsUtils#setFullFileStatus
+ * does not thrown an exception when setting ACLs and with recursion.
+ */
+ @Test
+ public void testSetFullFileStatusFailInheritAclsRecursive() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set("dfs.namenode.acls.enabled", "true");
+
+ Path fakeTarget = new Path("fakePath");
+ HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
+ FileStatus mockSourceStatus = mock(FileStatus.class);
+ FsShell mockFsShell = mock(FsShell.class);
+ AclStatus mockAclStatus = mock(AclStatus.class);
+
+ when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777));
+ when(mockAclStatus.toString()).thenReturn("");
+ when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
+ when(mockHadoopFileStatus.getAclEntries()).thenReturn(new ArrayList<>());
+ when(mockHadoopFileStatus.getAclStatus()).thenReturn(mockAclStatus);
+ doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class));
+
+ HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, true, mockFsShell);
+ verify(mockFsShell).run(new String[]{"-setfacl", "-R", "--set", any(String.class), fakeTarget.toString()});
+ }
+
+ /**
+ * Tests that HdfsUtils#setFullFileStatus
+ * does not thrown an exception when setting permissions and with recursion.
+ */
+ @Test
+ public void testSetFullFileStatusFailInheritPermsRecursive() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set("dfs.namenode.acls.enabled", "false");
+
+ Path fakeTarget = new Path("fakePath");
+ HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
+ FileStatus mockSourceStatus = mock(FileStatus.class);
+ FsShell mockFsShell = mock(FsShell.class);
+
+ when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777));
+ when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
+ doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class));
+
+ HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget,
+ true, mockFsShell);
+ verify(mockFsShell).run(new String[]{"-chmod", "-R", any(String.class), fakeTarget.toString()});
+ }
}
[7/7] hive git commit: HIVE-17967 Move HiveMetaStore class. This
closes #270 (Alan Gates, reviewed by Thejas Nair).
Posted by ga...@apache.org.
HIVE-17967 Move HiveMetaStore class. This closes #270 (Alan Gates, reviewed by Thejas Nair).
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8fcc7f32
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8fcc7f32
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8fcc7f32
Branch: refs/heads/master
Commit: 8fcc7f324c88eb33987e5c2c9de61d719167bbea
Parents: 89b6566
Author: Alan Gates <ga...@hortonworks.com>
Authored: Tue Nov 21 12:06:19 2017 -0800
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Tue Nov 21 12:06:19 2017 -0800
----------------------------------------------------------------------
.../apache/hadoop/hive/common/FileUtils.java | 21 -
.../TestMetaStoreConnectionUrlHook.java | 62 -
.../TestPartitionExpressionProxyDefault.java | 40 +
.../ql/TestMetaStoreLimitPartitionRequest.java | 3 +-
.../hadoop/hive/metastore/HiveMetaStore.java | 8165 ------------------
.../hadoop/hive/metastore/MetaStoreUtils.java | 861 +-
.../metastore/SerDeStorageSchemaReader.java | 58 +
.../hive/metastore/TSetIpAddressProcessor.java | 62 -
.../hive/metastore/TUGIBasedProcessor.java | 182 -
.../hive/metastore/repl/DumpDirCleanerTask.java | 30 +-
.../metastore/DummyJdoConnectionUrlHook.java | 45 -
.../DummyRawStoreForJdoConnection.java | 1011 ---
.../hive/metastore/MetaStoreTestUtils.java | 3 +
.../hive/metastore/TestHiveMetastoreCli.java | 63 -
.../hadoop/hive/metastore/TestObjectStore2.java | 229 -
.../apache/hadoop/hive/ql/TestTxnCommands.java | 5 +-
.../hive/ql/lockmgr/TestDbTxnManager2.java | 5 +-
.../add_partition_with_whitelist.q.out | 2 +-
.../alter_partition_with_whitelist.q.out | 2 +-
standalone-metastore/pom.xml | 2 +-
.../DefaultPartitionExpressionProxy.java | 57 +
.../metastore/DefaultStorageSchemaReader.java | 38 +
.../hadoop/hive/metastore/HiveMetaStore.java | 8037 +++++++++++++++++
.../hadoop/hive/metastore/MetaStoreInit.java | 2 +-
.../hive/metastore/MetastoreTaskThread.java | 38 +
.../hive/metastore/PartFilterExprUtil.java | 10 +-
.../hive/metastore/RunnableConfigurable.java | 26 -
.../hive/metastore/StorageSchemaReader.java | 46 +
.../hive/metastore/TSetIpAddressProcessor.java | 62 +
.../hive/metastore/TUGIBasedProcessor.java | 183 +
.../hive/metastore/conf/MetastoreConf.java | 45 +-
.../hive/metastore/events/EventCleanerTask.java | 34 +-
.../hive/metastore/events/ListenerEvent.java | 10 +
.../metastore/hooks/JDOConnectionURLHook.java | 5 +-
.../txn/AcidCompactionHistoryService.java | 13 +-
.../metastore/txn/AcidHouseKeeperService.java | 12 +-
.../txn/AcidOpenTxnsCounterService.java | 12 +-
.../hive/metastore/txn/AcidWriteSetService.java | 12 +-
.../hadoop/hive/metastore/txn/TxnHandler.java | 3 -
.../hive/metastore/utils/CommonCliOptions.java | 160 +
.../hadoop/hive/metastore/utils/FileUtils.java | 33 +
.../hadoop/hive/metastore/utils/HdfsUtils.java | 173 +
.../hadoop/hive/metastore/utils/LogUtils.java | 140 +
.../hive/metastore/utils/MetaStoreUtils.java | 345 +-
.../hive/metastore/utils/SecurityUtils.java | 75 +
.../metastore/DummyJdoConnectionUrlHook.java | 45 +
.../DummyRawStoreForJdoConnection.java | 1010 +++
.../hive/metastore/TestHiveMetastoreCli.java | 65 +
.../TestMetaStoreConnectionUrlHook.java | 47 +
.../hadoop/hive/metastore/TestObjectStore.java | 147 +
.../hive/metastore/conf/TestMetastoreConf.java | 13 +-
.../hive/metastore/utils/TestHdfsUtils.java | 152 +
52 files changed, 11245 insertions(+), 10656 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index ff09dd8..e0d9785 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -87,27 +87,6 @@ public final class FileUtils {
}
};
- public static final PathFilter SNAPSHOT_DIR_PATH_FILTER = new PathFilter() {
- @Override
- public boolean accept(Path p) {
- return ".snapshot".equalsIgnoreCase(p.getName());
- }
- };
-
- /**
- * Check if the path contains a subdirectory named '.snapshot'
- * @param p path to check
- * @param fs filesystem of the path
- * @return true if p contains a subdirectory named '.snapshot'
- * @throws IOException
- */
- public static boolean pathHasSnapshotSubDir(Path p, FileSystem fs) throws IOException {
- // Hadoop is missing a public API to check for snapshotable directories. Check with the directory name
- // until a more appropriate API is provided by HDFS-12257.
- final FileStatus[] statuses = fs.listStatus(p, FileUtils.SNAPSHOT_DIR_PATH_FILTER);
- return statuses != null && statuses.length != 0;
- }
-
/**
* Variant of Path.makeQualified that qualifies the input path against the default file system
* indicated by the configuration
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java
deleted file mode 100644
index 91a2888..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreConnectionUrlHook.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-/**
- * TestMetaStoreConnectionUrlHook
- * Verifies that when an instance of an implementation of RawStore is initialized, the connection
- * URL has already been updated by any metastore connect URL hooks.
- */
-public class TestMetaStoreConnectionUrlHook extends TestCase {
- private HiveConf hiveConf;
-
- @Override
- protected void setUp() throws Exception {
-
- super.setUp();
- }
-
- @Override
- protected void tearDown() throws Exception {
- super.tearDown();
- }
-
- public void testUrlHook() throws Exception {
- hiveConf = new HiveConf(this.getClass());
- hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLHOOK,
- DummyJdoConnectionUrlHook.class.getName());
- hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,
- DummyJdoConnectionUrlHook.initialUrl);
- hiveConf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
- DummyRawStoreForJdoConnection.class.getName());
- hiveConf.setBoolean("hive.metastore.checkForDefaultDb", true);
- SessionState.start(new CliSessionState(hiveConf));
-
- // Instantiating the HMSHandler with hive.metastore.checkForDefaultDb will cause it to
- // initialize an instance of the DummyRawStoreForJdoConnection
- HiveMetaStore.HMSHandler hms = new HiveMetaStore.HMSHandler(
- "test_metastore_connection_url_hook_hms_handler", hiveConf);
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionExpressionProxyDefault.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionExpressionProxyDefault.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionExpressionProxyDefault.java
new file mode 100644
index 0000000..e654c02
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionExpressionProxyDefault.java
@@ -0,0 +1,40 @@
+package org.apache.hadoop.hive.metastore;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Make sure that when HiveMetaStore is instantiated, the default proper PartitionExpressionProxy
+ * instance is instantiated.
+ */
+public class TestPartitionExpressionProxyDefault {
+
+ @Test
+ public void checkPartitionExpressionProxy() throws MetaException {
+ Configuration conf = MetastoreConf.newMetastoreConf();
+ HiveMetaStore.HMSHandler hms = new HiveMetaStore.HMSHandler("for testing", conf, true);
+ Assert.assertEquals(PartitionExpressionForMetastore.class,
+ hms.getExpressionProxy().getClass());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java
index 191d4a3..e783180 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java
@@ -35,6 +35,7 @@ import java.util.Set;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hive.jdbc.miniHS2.MiniHS2;
import org.apache.hive.service.cli.HiveSQLException;
import org.junit.After;
@@ -290,7 +291,7 @@ public class TestMetaStoreLimitPartitionRequest {
+ PARTITION_REQUEST_LIMIT);
} catch (HiveSQLException e) {
String exceedLimitMsg = String.format(HiveMetaStore.PARTITION_NUMBER_EXCEED_LIMIT_MSG, expectedPartitionNumber,
- TABLE_NAME, PARTITION_REQUEST_LIMIT, ConfVars.METASTORE_LIMIT_PARTITION_REQUEST.varname);
+ TABLE_NAME, PARTITION_REQUEST_LIMIT, MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST.toString());
assertTrue(getWrongExceptionMessage(exceedLimitMsg, e.getMessage()),
e.getMessage().contains(exceedLimitMsg.toString()));
}
[4/7] hive git commit: HIVE-17967 Move HiveMetaStore class. This
closes #270 (Alan Gates, reviewed by Thejas Nair).
Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
deleted file mode 100644
index f581c7d..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.junit.Test;
-
-public class TestHiveMetastoreCli {
- private static final String[] CLI_ARGUMENTS = { "9999" };
-
- @Test
- public void testDefaultCliPortValue() {
- HiveConf configuration = new HiveConf();
- HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
- assert (cli.getPort() == HiveConf.getIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT));
- }
-
- @Test
- public void testOverriddenCliPortValue() {
- HiveConf configuration = new HiveConf();
- HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
- cli.parse(TestHiveMetastoreCli.CLI_ARGUMENTS);
-
- assert (cli.getPort() == 9999);
- }
-
- @Test
- public void testOverriddenMetastoreServerPortValue() {
- HiveConf configuration = new HiveConf();
- HiveConf.setIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT, 12345);
-
- HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
-
- assert (cli.getPort() == 12345);
- }
-
- @Test
- public void testCliOverridesConfiguration() {
- HiveConf configuration = new HiveConf();
- HiveConf.setIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT, 12345);
-
- HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
- cli.parse(CLI_ARGUMENTS);
-
- assert (cli.getPort() == 9999);
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java
deleted file mode 100644
index fa4e02a..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.messaging.EventMessage;
-import org.apache.hadoop.hive.metastore.model.MNotificationLog;
-import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
-import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.concurrent.BrokenBarrierException;
-import java.util.concurrent.CyclicBarrier;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hive.metastore.TestOldSchema.dropAllStoreObjects;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-// Tests from TestObjectStore that can't be moved yet due to references to EventMessage. Once
-// EventMessage has been moved this should be recombined with TestObjectStore.
-
-public class TestObjectStore2 {
- private ObjectStore objectStore = null;
-
- public static class MockPartitionExpressionProxy implements PartitionExpressionProxy {
- @Override
- public String convertExprToFilter(byte[] expr) throws MetaException {
- return null;
- }
-
- @Override
- public boolean filterPartitionsByExpr(List<FieldSchema> partColumns,
- byte[] expr, String defaultPartitionName, List<String> partitionNames)
- throws MetaException {
- return false;
- }
-
- @Override
- public FileMetadataExprType getMetadataType(String inputFormat) {
- return null;
- }
-
- @Override
- public SearchArgument createSarg(byte[] expr) {
- return null;
- }
-
- @Override
- public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) {
- return null;
- }
- }
-
- @Before
- public void setUp() throws Exception {
- Configuration conf = MetastoreConf.newMetastoreConf();
- MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
- MockPartitionExpressionProxy.class.getName());
-
- objectStore = new ObjectStore();
- objectStore.setConf(conf);
- dropAllStoreObjects(objectStore);
- }
-
- /**
- * Test notification operations
- */
- // TODO MS-SPLIT uncomment once we move EventMessage over
- @Test
- public void testNotificationOps() throws InterruptedException {
- final int NO_EVENT_ID = 0;
- final int FIRST_EVENT_ID = 1;
- final int SECOND_EVENT_ID = 2;
-
- NotificationEvent event =
- new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), "");
- NotificationEventResponse eventResponse;
- CurrentNotificationEventId eventId;
-
- // Verify that there is no notifications available yet
- eventId = objectStore.getCurrentNotificationEventId();
- assertEquals(NO_EVENT_ID, eventId.getEventId());
-
- // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID
- objectStore.addNotificationEvent(event);
- assertEquals(FIRST_EVENT_ID, event.getEventId());
- objectStore.addNotificationEvent(event);
- assertEquals(SECOND_EVENT_ID, event.getEventId());
-
- // Verify that objectStore fetches the latest notification event ID
- eventId = objectStore.getCurrentNotificationEventId();
- assertEquals(SECOND_EVENT_ID, eventId.getEventId());
-
- // Verify that getNextNotification() returns all events
- eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
- assertEquals(2, eventResponse.getEventsSize());
- assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
- assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId());
-
- // Verify that getNextNotification(last) returns events after a specified event
- eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID));
- assertEquals(1, eventResponse.getEventsSize());
- assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
-
- // Verify that getNextNotification(last) returns zero events if there are no more notifications available
- eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID));
- assertEquals(0, eventResponse.getEventsSize());
-
- // Verify that cleanNotificationEvents() cleans up all old notifications
- Thread.sleep(1);
- objectStore.cleanNotificationEvents(1);
- eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
- assertEquals(0, eventResponse.getEventsSize());
- }
-
- @Ignore(
- "This test is here to allow testing with other databases like mysql / postgres etc\n"
- + " with user changes to the code. This cannot be run on apache derby because of\n"
- + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html"
- )
- @Test
- public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException {
-
- final int NUM_THREADS = 10;
- CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS,
- () -> LoggerFactory.getLogger("test")
- .debug(NUM_THREADS + " threads going to add notification"));
-
- Configuration conf = MetastoreConf.newMetastoreConf();
- MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
- MockPartitionExpressionProxy.class.getName());
- /*
- Below are the properties that need to be set based on what database this test is going to be run
- */
-
-// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver");
-// conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,
-// "jdbc:mysql://localhost:3306/metastore_db");
-// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "");
-// conf.setVar(HiveConf.ConfVars.METASTOREPWD, "");
-
- /*
- we have to add this one manually as for tests the db is initialized via the metastoreDiretSQL
- and we don't run the schema creation sql that includes the an insert for notification_sequence
- which can be locked. the entry in notification_sequence happens via notification_event insertion.
- */
- objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute();
- objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute();
-
- objectStore.addNotificationEvent(
- new NotificationEvent(0, 0,
- EventMessage.EventType.CREATE_DATABASE.toString(),
- "CREATE DATABASE DB initial"));
-
- ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS);
- for (int i = 0; i < NUM_THREADS; i++) {
- final int n = i;
-
- executorService.execute(
- () -> {
- ObjectStore store = new ObjectStore();
- store.setConf(conf);
-
- String eventType = EventMessage.EventType.CREATE_DATABASE.toString();
- NotificationEvent dbEvent =
- new NotificationEvent(0, 0, eventType,
- "CREATE DATABASE DB" + n);
- System.out.println("ADDING NOTIFICATION");
-
- try {
- cyclicBarrier.await();
- } catch (InterruptedException | BrokenBarrierException e) {
- throw new RuntimeException(e);
- }
- store.addNotificationEvent(dbEvent);
- System.out.println("FINISH NOTIFICATION");
- });
- }
- executorService.shutdown();
- assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS));
-
- // we have to setup this again as the underlying PMF keeps getting reinitialized with original
- // reference closed
- ObjectStore store = new ObjectStore();
- store.setConf(conf);
-
- NotificationEventResponse eventResponse = store.getNextNotification(
- new NotificationEventRequest());
- assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize());
- long previousId = 0;
- for (NotificationEvent event : eventResponse.getEvents()) {
- assertTrue("previous:" + previousId + " current:" + event.getEventId(),
- previousId < event.getEventId());
- assertTrue(previousId + 1 == event.getEventId());
- previousId = event.getEventId();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
index 149a9ad..52257c4 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hive.ql;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.RunnableConfigurable;
+import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
import org.apache.hadoop.hive.metastore.api.LockState;
import org.apache.hadoop.hive.metastore.api.LockType;
@@ -37,7 +37,6 @@ import org.apache.hadoop.hive.ql.io.BucketCodec;
import org.apache.hadoop.hive.ql.lockmgr.TestDbTxnManager2;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.junit.After;
import org.junit.Assert;
import org.junit.Ignore;
import org.junit.Test;
@@ -338,7 +337,7 @@ public class TestTxnCommands extends TxnCommandsBaseForTests {
runStatementOnDriver("delete from " + Table.ACIDTBL + " where a = 5");
//make sure currently running txn is considered aborted by housekeeper
hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 2, TimeUnit.MILLISECONDS);
- RunnableConfigurable houseKeeperService = new AcidHouseKeeperService();
+ MetastoreTaskThread houseKeeperService = new AcidHouseKeeperService();
houseKeeperService.setConf(hiveConf);
//this will abort the txn
houseKeeperService.run();
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 2b72664..d309e3d 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -19,13 +19,12 @@ package org.apache.hadoop.hive.ql.lockmgr;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.metastore.RunnableConfigurable;
+import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
import org.apache.hadoop.hive.metastore.api.DataOperationType;
import org.apache.hadoop.hive.metastore.txn.AcidWriteSetService;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
import org.apache.hadoop.hive.metastore.txn.TxnUtils;
-import org.apache.hadoop.hive.ql.TestTxnCommands2;
import org.junit.After;
import org.junit.Assert;
import org.apache.hadoop.hive.common.FileUtils;
@@ -1123,7 +1122,7 @@ public class TestDbTxnManager2 {
Assert.assertEquals("Unexpected lock count", 1, locks.size());
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB2", null, locks);
txnMgr.commitTxn();
- RunnableConfigurable writeSetService = new AcidWriteSetService();
+ MetastoreTaskThread writeSetService = new AcidWriteSetService();
writeSetService.setConf(conf);
writeSetService.run();
Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET"));
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out b/ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out
index 53b6b46..95da219 100644
--- a/ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out
+++ b/ql/src/test/results/clientnegative/add_partition_with_whitelist.q.out
@@ -15,4 +15,4 @@ POSTHOOK: Input: default@part_whitelist_test
PREHOOK: query: ALTER TABLE part_whitelist_test ADD PARTITION (ds='1,2,3,4')
PREHOOK: type: ALTERTABLE_ADDPARTS
PREHOOK: Output: default@part_whitelist_test
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Partition value '1,2,3,4' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'. (configure with hive.metastore.partition.name.whitelist.pattern))
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Partition value '1,2,3,4' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'. (configure with metastore.partition.name.whitelist.pattern))
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out b/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out
index ad0cd8b..ae265eb 100644
--- a/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out
+++ b/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out
@@ -23,4 +23,4 @@ PREHOOK: query: ALTER TABLE part_whitelist_test PARTITION (ds='1') rename to par
PREHOOK: type: ALTERTABLE_RENAMEPART
PREHOOK: Input: default@part_whitelist_test
PREHOOK: Output: default@part_whitelist_test@ds=1
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to rename partition. Partition value '1,2,3' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'. (configure with hive.metastore.partition.name.whitelist.pattern)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to rename partition. Partition value '1,2,3' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'. (configure with metastore.partition.name.whitelist.pattern)
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/pom.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
index 8d531b2..1701182 100644
--- a/standalone-metastore/pom.xml
+++ b/standalone-metastore/pom.xml
@@ -192,7 +192,7 @@
</exclusion>
</exclusions>
</dependency>
- <dependency>
+ <dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-client</artifactId>
<version>${hadoop.version}</version>
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultPartitionExpressionProxy.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultPartitionExpressionProxy.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultPartitionExpressionProxy.java
new file mode 100644
index 0000000..ec543be
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultPartitionExpressionProxy.java
@@ -0,0 +1,57 @@
+package org.apache.hadoop.hive.metastore;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
+
+import java.util.List;
+
+/**
+ * Default implementation of PartitionExpressionProxy. Eventually this should use the SARGs in
+ * Hive's storage-api. For now it just throws UnsupportedOperationException.
+ */
+public class DefaultPartitionExpressionProxy implements PartitionExpressionProxy {
+ @Override
+ public String convertExprToFilter(byte[] expr) throws MetaException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean filterPartitionsByExpr(List<FieldSchema> partColumns, byte[] expr, String
+ defaultPartitionName, List<String> partitionNames) throws MetaException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public FileMetadataExprType getMetadataType(String inputFormat) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public SearchArgument createSarg(byte[] expr) {
+ throw new UnsupportedOperationException();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultStorageSchemaReader.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultStorageSchemaReader.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultStorageSchemaReader.java
new file mode 100644
index 0000000..1dbfa42
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultStorageSchemaReader.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+import java.util.List;
+
+/**
+ * Default StorageSchemaReader. This just throws as the metastore currently doesn't know how to
+ * read schemas from storage.
+ */
+public class DefaultStorageSchemaReader implements StorageSchemaReader {
+ @Override
+ public List<FieldSchema> readSchema(Table tbl, EnvironmentContext envContext,
+ Configuration conf) throws MetaException {
+ throw new UnsupportedOperationException("Storage schema reading not supported");
+ }
+}
[6/7] hive git commit: HIVE-17967 Move HiveMetaStore class. This
closes #270 (Alan Gates, reviewed by Thejas Nair).
Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
deleted file mode 100644
index c22ac9e..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ /dev/null
@@ -1,8165 +0,0 @@
-/** * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import static org.apache.commons.lang.StringUtils.join;
-import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT;
-import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
-import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName;
-
-import com.google.common.collect.Sets;
-import org.apache.hadoop.hive.metastore.model.MWMPool;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.security.PrivilegedExceptionAction;
-import java.util.AbstractMap;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.Set;
-import java.util.Timer;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.regex.Pattern;
-
-import javax.jdo.JDOException;
-
-import com.codahale.metrics.Counter;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableListMultimap;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Multimaps;
-
-import org.apache.commons.cli.OptionBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.common.LogUtils;
-import org.apache.hadoop.hive.common.LogUtils.LogInitializationException;
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.auth.HiveAuthUtils;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience;
-import org.apache.hadoop.hive.common.classification.InterfaceStability;
-import org.apache.hadoop.hive.common.cli.CommonCliOptions;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.io.HdfsUtils;
-import org.apache.hadoop.hive.metastore.api.*;
-import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent;
-import org.apache.hadoop.hive.metastore.cache.CachedStore;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
-import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent;
-import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.AddPrimaryKeyEvent;
-import org.apache.hadoop.hive.metastore.events.AddUniqueConstraintEvent;
-import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
-import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
-import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
-import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
-import org.apache.hadoop.hive.metastore.events.DropConstraintEvent;
-import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
-import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
-import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.DropTableEvent;
-import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
-import org.apache.hadoop.hive.metastore.events.InsertEvent;
-import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
-import org.apache.hadoop.hive.metastore.events.PreAddIndexEvent;
-import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.PreAlterDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.PreAlterIndexEvent;
-import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.PreAuthorizationCallEvent;
-import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
-import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.PreDropIndexEvent;
-import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
-import org.apache.hadoop.hive.metastore.events.PreEventContext;
-import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
-import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
-import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler;
-import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
-import org.apache.hadoop.hive.metastore.metrics.JvmPauseMonitor;
-import org.apache.hadoop.hive.metastore.metrics.Metrics;
-import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
-import org.apache.hadoop.hive.metastore.metrics.PerfLogger;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
-import org.apache.hadoop.hive.metastore.repl.DumpDirCleanerTask;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
-import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport;
-import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService;
-import org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService;
-import org.apache.hadoop.hive.metastore.txn.AcidCompactionHistoryService;
-import org.apache.hadoop.hive.metastore.txn.AcidWriteSetService;
-import org.apache.hadoop.hive.metastore.txn.TxnStore;
-import org.apache.hadoop.hive.metastore.txn.TxnUtils;
-import org.apache.hadoop.hive.serde2.Deserializer;
-import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.shims.Utils;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hive.common.util.HiveStringUtils;
-import org.apache.hive.common.util.ShutdownHookManager;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.core.LoggerContext;
-import org.apache.thrift.TException;
-import org.apache.thrift.TProcessor;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.protocol.TProtocolFactory;
-import org.apache.thrift.server.ServerContext;
-import org.apache.thrift.server.TServer;
-import org.apache.thrift.server.TServerEventHandler;
-import org.apache.thrift.server.TThreadPoolServer;
-import org.apache.thrift.transport.TFramedTransport;
-import org.apache.thrift.transport.TServerSocket;
-import org.apache.thrift.transport.TTransport;
-import org.apache.thrift.transport.TTransportFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-
-import com.facebook.fb303.FacebookBase;
-import com.facebook.fb303.fb_status;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Splitter;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-/**
- * TODO:pc remove application logic to a separate interface.
- */
-public class HiveMetaStore extends ThriftHiveMetastore {
- public static final Logger LOG = LoggerFactory.getLogger(HiveMetaStore.class);
- public static final String PARTITION_NUMBER_EXCEED_LIMIT_MSG =
- "Number of partitions scanned (=%d) on table '%s' exceeds limit (=%d). This is controlled on the metastore server by %s.";
-
- // boolean that tells if the HiveMetaStore (remote) server is being used.
- // Can be used to determine if the calls to metastore api (HMSHandler) are being made with
- // embedded metastore or a remote one
- private static boolean isMetaStoreRemote = false;
-
- // Used for testing to simulate method timeout.
- @VisibleForTesting
- static boolean TEST_TIMEOUT_ENABLED = false;
- @VisibleForTesting
- static long TEST_TIMEOUT_VALUE = -1;
-
- public static final String ADMIN = "admin";
- public static final String PUBLIC = "public";
- /** MM write states. */
- public static final char MM_WRITE_OPEN = 'o', MM_WRITE_COMMITTED = 'c', MM_WRITE_ABORTED = 'a';
-
- private static HadoopThriftAuthBridge.Server saslServer;
- private static MetastoreDelegationTokenManager delegationTokenManager;
- private static boolean useSasl;
-
- public static final String NO_FILTER_STRING = "";
- public static final int UNLIMITED_MAX_PARTITIONS = -1;
-
- private static final class ChainedTTransportFactory extends TTransportFactory {
- private final TTransportFactory parentTransFactory;
- private final TTransportFactory childTransFactory;
-
- private ChainedTTransportFactory(
- TTransportFactory parentTransFactory,
- TTransportFactory childTransFactory) {
- this.parentTransFactory = parentTransFactory;
- this.childTransFactory = childTransFactory;
- }
-
- @Override
- public TTransport getTransport(TTransport trans) {
- return childTransFactory.getTransport(parentTransFactory.getTransport(trans));
- }
- }
-
- public static class HMSHandler extends FacebookBase implements IHMSHandler {
- public static final Logger LOG = HiveMetaStore.LOG;
- private final HiveConf hiveConf; // stores datastore (jpox) properties,
- // right now they come from jpox.properties
-
- private static String currentUrl;
- private FileMetadataManager fileMetadataManager;
- private PartitionExpressionProxy expressionProxy;
-
- // Variables for metrics
- // Package visible so that HMSMetricsListener can see them.
- static AtomicInteger databaseCount, tableCount, partCount;
-
- private Warehouse wh; // hdfs warehouse
- private static final ThreadLocal<RawStore> threadLocalMS =
- new ThreadLocal<RawStore>() {
- @Override
- protected RawStore initialValue() {
- return null;
- }
- };
-
- private static final ThreadLocal<TxnStore> threadLocalTxn = new ThreadLocal<TxnStore>() {
- @Override
- protected TxnStore initialValue() {
- return null;
- }
- };
-
- private static final ThreadLocal<Map<String, com.codahale.metrics.Timer.Context>> timerContexts =
- new ThreadLocal<Map<String, com.codahale.metrics.Timer.Context>>() {
- @Override
- protected Map<String, com.codahale.metrics.Timer.Context> initialValue() {
- return new HashMap<>();
- }
- };
-
- public static RawStore getRawStore() {
- return threadLocalMS.get();
- }
-
- public static void removeRawStore() {
- threadLocalMS.remove();
- }
-
- // Thread local configuration is needed as many threads could make changes
- // to the conf using the connection hook
- private static final ThreadLocal<Configuration> threadLocalConf =
- new ThreadLocal<Configuration>() {
- @Override
- protected Configuration initialValue() {
- return null;
- }
- };
-
- /**
- * Thread local HMSHandler used during shutdown to notify meta listeners
- */
- private static final ThreadLocal<HMSHandler> threadLocalHMSHandler = new ThreadLocal<>();
-
- /**
- * Thread local Map to keep track of modified meta conf keys
- */
- private static final ThreadLocal<Map<String, String>> threadLocalModifiedConfig =
- new ThreadLocal<>();
-
- private static ExecutorService threadPool;
-
- public static final Logger auditLog = LoggerFactory.getLogger(
- HiveMetaStore.class.getName() + ".audit");
-
- private static final void logAuditEvent(String cmd) {
- if (cmd == null) {
- return;
- }
-
- UserGroupInformation ugi;
- try {
- ugi = Utils.getUGI();
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
-
- String address = getIPAddress();
- if (address == null) {
- address = "unknown-ip-addr";
- }
-
- auditLog.info("ugi={} ip={} cmd={} ", ugi.getUserName(), address, cmd);
- }
-
- private static String getIPAddress() {
- if (useSasl) {
- if (saslServer != null && saslServer.getRemoteAddress() != null) {
- return saslServer.getRemoteAddress().getHostAddress();
- }
- } else {
- // if kerberos is not enabled
- return getThreadLocalIpAddress();
- }
- return null;
- }
-
- private static int nextSerialNum = 0;
- private static ThreadLocal<Integer> threadLocalId = new ThreadLocal<Integer>() {
- @Override
- protected Integer initialValue() {
- return new Integer(nextSerialNum++);
- }
- };
-
- // This will only be set if the metastore is being accessed from a metastore Thrift server,
- // not if it is from the CLI. Also, only if the TTransport being used to connect is an
- // instance of TSocket. This is also not set when kerberos is used.
- private static ThreadLocal<String> threadLocalIpAddress = new ThreadLocal<String>() {
- @Override
- protected String initialValue() {
- return null;
- }
- };
-
- /**
- * Internal function to notify listeners for meta config change events
- */
- private void notifyMetaListeners(String key, String oldValue, String newValue) throws MetaException {
- for (MetaStoreEventListener listener : listeners) {
- listener.onConfigChange(new ConfigChangeEvent(this, key, oldValue, newValue));
- }
-
- if (transactionalListeners.size() > 0) {
- // All the fields of this event are final, so no reason to create a new one for each
- // listener
- ConfigChangeEvent cce = new ConfigChangeEvent(this, key, oldValue, newValue);
- for (MetaStoreEventListener transactionalListener : transactionalListeners) {
- transactionalListener.onConfigChange(cce);
- }
- }
- }
-
- /**
- * Internal function to notify listeners to revert back to old values of keys
- * that were modified during setMetaConf. This would get called from HiveMetaStore#cleanupRawStore
- */
- private void notifyMetaListenersOnShutDown() {
- Map<String, String> modifiedConf = threadLocalModifiedConfig.get();
- if (modifiedConf == null) {
- // Nothing got modified
- return;
- }
- try {
- Configuration conf = threadLocalConf.get();
- if (conf == null) {
- throw new MetaException("Unexpected: modifiedConf is non-null but conf is null");
- }
- // Notify listeners of the changed value
- for (Entry<String, String> entry : modifiedConf.entrySet()) {
- String key = entry.getKey();
- // curr value becomes old and vice-versa
- String currVal = entry.getValue();
- String oldVal = conf.get(key);
- if (!Objects.equals(oldVal, currVal)) {
- notifyMetaListeners(key, oldVal, currVal);
- }
- }
- logInfo("Meta listeners shutdown notification completed.");
- } catch (MetaException e) {
- LOG.error("Failed to notify meta listeners on shutdown: ", e);
- }
- }
-
- public static void setThreadLocalIpAddress(String ipAddress) {
- threadLocalIpAddress.set(ipAddress);
- }
-
- // This will return null if the metastore is not being accessed from a metastore Thrift server,
- // or if the TTransport being used to connect is not an instance of TSocket, or if kereberos
- // is used
- public static String getThreadLocalIpAddress() {
- return threadLocalIpAddress.get();
- }
-
- /**
- * Use {@link #getThreadId()} instead.
- * @return thread id
- */
- @Deprecated
- public static Integer get() {
- return threadLocalId.get();
- }
-
- @Override
- public int getThreadId() {
- return threadLocalId.get();
- }
-
- public HMSHandler(String name) throws MetaException {
- this(name, new HiveConf(HMSHandler.class), true);
- }
-
- public HMSHandler(String name, HiveConf conf) throws MetaException {
- this(name, conf, true);
- }
-
- public HMSHandler(String name, HiveConf conf, boolean init) throws MetaException {
- super(name);
- hiveConf = conf;
- isInTest = HiveConf.getBoolVar(hiveConf, ConfVars.HIVE_IN_TEST);
- if (threadPool == null) {
- synchronized (HMSHandler.class) {
- if (threadPool == null) {
- int numThreads = HiveConf.getIntVar(conf,
- ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT);
- threadPool = Executors.newFixedThreadPool(numThreads,
- new ThreadFactoryBuilder().setDaemon(true)
- .setNameFormat("HMSHandler #%d").build());
- }
- }
- }
- if (init) {
- init();
- }
- }
-
- public HiveConf getHiveConf() {
- return hiveConf;
- }
-
- private ClassLoader classLoader;
- private AlterHandler alterHandler;
- private List<MetaStorePreEventListener> preListeners;
- private List<MetaStoreEventListener> listeners;
- private List<TransactionalMetaStoreEventListener> transactionalListeners;
- private List<MetaStoreEndFunctionListener> endFunctionListeners;
- private List<MetaStoreInitListener> initListeners;
- private Pattern partitionValidationPattern;
- private final boolean isInTest;
-
- {
- classLoader = Thread.currentThread().getContextClassLoader();
- if (classLoader == null) {
- classLoader = Configuration.class.getClassLoader();
- }
- }
-
- @Override
- public List<TransactionalMetaStoreEventListener> getTransactionalListeners() {
- return transactionalListeners;
- }
-
- @Override
- public void init() throws MetaException {
- initListeners = MetaStoreUtils.getMetaStoreListeners(
- MetaStoreInitListener.class, hiveConf,
- hiveConf.getVar(HiveConf.ConfVars.METASTORE_INIT_HOOKS));
- for (MetaStoreInitListener singleInitListener: initListeners) {
- MetaStoreInitContext context = new MetaStoreInitContext();
- singleInitListener.onInit(context);
- }
-
- String alterHandlerName = hiveConf.get("hive.metastore.alter.impl",
- HiveAlterHandler.class.getName());
- alterHandler = (AlterHandler) ReflectionUtils.newInstance(MetaStoreUtils.getClass(
- alterHandlerName), hiveConf);
- wh = new Warehouse(hiveConf);
-
- synchronized (HMSHandler.class) {
- if (currentUrl == null || !currentUrl.equals(MetaStoreInit.getConnectionURL(hiveConf))) {
- createDefaultDB();
- createDefaultRoles();
- addAdminUsers();
- currentUrl = MetaStoreInit.getConnectionURL(hiveConf);
- }
- }
-
- //Start Metrics
- if (hiveConf.getBoolVar(ConfVars.METASTORE_METRICS)) {
- LOG.info("Begin calculating metadata count metrics.");
- Metrics.initialize(hiveConf);
- databaseCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_DATABASES);
- tableCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_TABLES);
- partCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_PARTITIONS);
- updateMetrics();
-
- }
-
- preListeners = MetaStoreUtils.getMetaStoreListeners(MetaStorePreEventListener.class,
- hiveConf,
- hiveConf.getVar(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS));
- preListeners.add(0, new TransactionalValidationListener(hiveConf));
- listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, hiveConf,
- hiveConf.getVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS));
- listeners.add(new SessionPropertiesListener(hiveConf));
- listeners.add(new AcidEventListener(hiveConf));
- transactionalListeners = MetaStoreUtils.getMetaStoreListeners(TransactionalMetaStoreEventListener.class,hiveConf,
- hiveConf.getVar(ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS));
- if (Metrics.getRegistry() != null) {
- listeners.add(new HMSMetricsListener(hiveConf));
- }
-
- endFunctionListeners = MetaStoreUtils.getMetaStoreListeners(
- MetaStoreEndFunctionListener.class, hiveConf,
- hiveConf.getVar(HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS));
-
- String partitionValidationRegex =
- hiveConf.getVar(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN);
- if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) {
- partitionValidationPattern = Pattern.compile(partitionValidationRegex);
- } else {
- partitionValidationPattern = null;
- }
-
- long cleanFreq = hiveConf.getTimeVar(ConfVars.METASTORE_EVENT_CLEAN_FREQ, TimeUnit.MILLISECONDS);
- if (cleanFreq > 0) {
- // In default config, there is no timer.
- Timer cleaner = new Timer("Metastore Events Cleaner Thread", true);
- cleaner.schedule(new EventCleanerTask(this), cleanFreq, cleanFreq);
- }
-
- cleanFreq = hiveConf.getTimeVar(ConfVars.REPL_DUMPDIR_CLEAN_FREQ, TimeUnit.MILLISECONDS);
- if (cleanFreq > 0) {
- // In default config, there is no timer.
- Timer cleaner = new Timer("Repl Dump Dir Cleaner Thread", true);
- cleaner.schedule(new DumpDirCleanerTask(hiveConf), cleanFreq, cleanFreq);
- }
- expressionProxy = PartFilterExprUtil.createExpressionProxy(hiveConf);
- fileMetadataManager = new FileMetadataManager(this.getMS(), hiveConf);
- }
-
- private static String addPrefix(String s) {
- return threadLocalId.get() + ": " + s;
- }
-
- /**
- * Set copy of invoking HMSHandler on thread local
- */
- private static void setHMSHandler(HMSHandler handler) {
- if (threadLocalHMSHandler.get() == null) {
- threadLocalHMSHandler.set(handler);
- }
- }
- @Override
- public void setConf(Configuration conf) {
- threadLocalConf.set(conf);
- RawStore ms = threadLocalMS.get();
- if (ms != null) {
- ms.setConf(conf); // reload if DS related configuration is changed
- }
- }
-
- @Override
- public Configuration getConf() {
- Configuration conf = threadLocalConf.get();
- if (conf == null) {
- conf = new Configuration(hiveConf);
- threadLocalConf.set(conf);
- }
- return conf;
- }
-
- private Map<String, String> getModifiedConf() {
- Map<String, String> modifiedConf = threadLocalModifiedConfig.get();
- if (modifiedConf == null) {
- modifiedConf = new HashMap<String, String>();
- threadLocalModifiedConfig.set(modifiedConf);
- }
- return modifiedConf;
- }
-
- @Override
- public Warehouse getWh() {
- return wh;
- }
-
- @Override
- public void setMetaConf(String key, String value) throws MetaException {
- ConfVars confVar = HiveConf.getMetaConf(key);
- if (confVar == null) {
- throw new MetaException("Invalid configuration key " + key);
- }
- String validate = confVar.validate(value);
- if (validate != null) {
- throw new MetaException("Invalid configuration value " + value + " for key " + key +
- " by " + validate);
- }
- Configuration configuration = getConf();
- String oldValue = configuration.get(key);
- // Save prev val of the key on threadLocal
- Map<String, String> modifiedConf = getModifiedConf();
- if (!modifiedConf.containsKey(key)) {
- modifiedConf.put(key, oldValue);
- }
- // Set invoking HMSHandler on threadLocal, this will be used later to notify
- // metaListeners in HiveMetaStore#cleanupRawStore
- setHMSHandler(this);
- configuration.set(key, value);
- notifyMetaListeners(key, oldValue, value);
- }
-
- @Override
- public String getMetaConf(String key) throws MetaException {
- ConfVars confVar = HiveConf.getMetaConf(key);
- if (confVar == null) {
- throw new MetaException("Invalid configuration key " + key);
- }
- return getConf().get(key, confVar.getDefaultValue());
- }
-
- /**
- * Get a cached RawStore.
- *
- * @return the cached RawStore
- * @throws MetaException
- */
- @InterfaceAudience.LimitedPrivate({"HCATALOG"})
- @InterfaceStability.Evolving
- @Override
- public RawStore getMS() throws MetaException {
- Configuration conf = getConf();
- return getMSForConf(conf);
- }
-
- public static RawStore getMSForConf(Configuration conf) throws MetaException {
- RawStore ms = threadLocalMS.get();
- if (ms == null) {
- ms = newRawStoreForConf(conf);
- ms.verifySchema();
- threadLocalMS.set(ms);
- ms = threadLocalMS.get();
- }
- return ms;
- }
-
- private TxnStore getTxnHandler() {
- TxnStore txn = threadLocalTxn.get();
- if (txn == null) {
- txn = TxnUtils.getTxnStore(hiveConf);
- threadLocalTxn.set(txn);
- }
- return txn;
- }
-
- private static RawStore newRawStoreForConf(Configuration conf) throws MetaException {
- HiveConf hiveConf = new HiveConf(conf, HiveConf.class);
- String rawStoreClassName = hiveConf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL);
- LOG.info(addPrefix("Opening raw store with implementation class:" + rawStoreClassName));
- if (hiveConf.getBoolVar(ConfVars.METASTORE_FASTPATH)) {
- LOG.info("Fastpath, skipping raw store proxy");
- try {
- RawStore rs =
- ((Class<? extends RawStore>) MetaStoreUtils.getClass(rawStoreClassName))
- .newInstance();
- rs.setConf(hiveConf);
- return rs;
- } catch (Exception e) {
- LOG.error("Unable to instantiate raw store directly in fastpath mode", e);
- throw new RuntimeException(e);
- }
- }
- return RawStoreProxy.getProxy(hiveConf, conf, rawStoreClassName, threadLocalId.get());
- }
-
- private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException {
- try {
- ms.getDatabase(DEFAULT_DATABASE_NAME);
- } catch (NoSuchObjectException e) {
- Database db = new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT,
- wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null);
- db.setOwnerName(PUBLIC);
- db.setOwnerType(PrincipalType.ROLE);
- ms.createDatabase(db);
- }
- }
-
- /**
- * create default database if it doesn't exist.
- *
- * This is a potential contention when HiveServer2 using embedded metastore and Metastore
- * Server try to concurrently invoke createDefaultDB. If one failed, JDOException was caught
- * for one more time try, if failed again, simply ignored by warning, which meant another
- * succeeds.
- *
- * @throws MetaException
- */
- private void createDefaultDB() throws MetaException {
- try {
- createDefaultDB_core(getMS());
- } catch (JDOException e) {
- LOG.warn("Retrying creating default database after error: " + e.getMessage(), e);
- try {
- createDefaultDB_core(getMS());
- } catch (InvalidObjectException e1) {
- throw new MetaException(e1.getMessage());
- }
- } catch (InvalidObjectException e) {
- throw new MetaException(e.getMessage());
- }
- }
-
- /**
- * create default roles if they don't exist.
- *
- * This is a potential contention when HiveServer2 using embedded metastore and Metastore
- * Server try to concurrently invoke createDefaultRoles. If one failed, JDOException was caught
- * for one more time try, if failed again, simply ignored by warning, which meant another
- * succeeds.
- *
- * @throws MetaException
- */
- private void createDefaultRoles() throws MetaException {
- try {
- createDefaultRoles_core();
- } catch (JDOException e) {
- LOG.warn("Retrying creating default roles after error: " + e.getMessage(), e);
- createDefaultRoles_core();
- }
- }
-
- private void createDefaultRoles_core() throws MetaException {
-
- RawStore ms = getMS();
- try {
- ms.addRole(ADMIN, ADMIN);
- } catch (InvalidObjectException e) {
- LOG.debug(ADMIN +" role already exists",e);
- } catch (NoSuchObjectException e) {
- // This should never be thrown.
- LOG.warn("Unexpected exception while adding " +ADMIN+" roles" , e);
- }
- LOG.info("Added "+ ADMIN+ " role in metastore");
- try {
- ms.addRole(PUBLIC, PUBLIC);
- } catch (InvalidObjectException e) {
- LOG.debug(PUBLIC + " role already exists",e);
- } catch (NoSuchObjectException e) {
- // This should never be thrown.
- LOG.warn("Unexpected exception while adding "+PUBLIC +" roles" , e);
- }
- LOG.info("Added "+PUBLIC+ " role in metastore");
- // now grant all privs to admin
- PrivilegeBag privs = new PrivilegeBag();
- privs.addToPrivileges(new HiveObjectPrivilege( new HiveObjectRef(HiveObjectType.GLOBAL, null,
- null, null, null), ADMIN, PrincipalType.ROLE, new PrivilegeGrantInfo("All", 0, ADMIN,
- PrincipalType.ROLE, true)));
- try {
- ms.grantPrivileges(privs);
- } catch (InvalidObjectException e) {
- // Surprisingly these privs are already granted.
- LOG.debug("Failed while granting global privs to admin", e);
- } catch (NoSuchObjectException e) {
- // Unlikely to be thrown.
- LOG.warn("Failed while granting global privs to admin", e);
- }
- }
-
- /**
- * add admin users if they don't exist.
- *
- * This is a potential contention when HiveServer2 using embedded metastore and Metastore
- * Server try to concurrently invoke addAdminUsers. If one failed, JDOException was caught for
- * one more time try, if failed again, simply ignored by warning, which meant another succeeds.
- *
- * @throws MetaException
- */
- private void addAdminUsers() throws MetaException {
- try {
- addAdminUsers_core();
- } catch (JDOException e) {
- LOG.warn("Retrying adding admin users after error: " + e.getMessage(), e);
- addAdminUsers_core();
- }
- }
-
- private void addAdminUsers_core() throws MetaException {
-
- // now add pre-configured users to admin role
- String userStr = HiveConf.getVar(hiveConf,ConfVars.USERS_IN_ADMIN_ROLE,"").trim();
- if (userStr.isEmpty()) {
- LOG.info("No user is added in admin role, since config is empty");
- return;
- }
- // Since user names need to be valid unix user names, per IEEE Std 1003.1-2001 they cannot
- // contain comma, so we can safely split above string on comma.
-
- Iterator<String> users = Splitter.on(",").trimResults().omitEmptyStrings().split(userStr).iterator();
- if (!users.hasNext()) {
- LOG.info("No user is added in admin role, since config value "+ userStr +
- " is in incorrect format. We accept comma separated list of users.");
- return;
- }
- Role adminRole;
- RawStore ms = getMS();
- try {
- adminRole = ms.getRole(ADMIN);
- } catch (NoSuchObjectException e) {
- LOG.error("Failed to retrieve just added admin role",e);
- return;
- }
- while (users.hasNext()) {
- String userName = users.next();
- try {
- ms.grantRole(adminRole, userName, PrincipalType.USER, ADMIN, PrincipalType.ROLE, true);
- LOG.info("Added " + userName + " to admin role");
- } catch (NoSuchObjectException e) {
- LOG.error("Failed to add "+ userName + " in admin role",e);
- } catch (InvalidObjectException e) {
- LOG.debug(userName + " already in admin role", e);
- }
- }
- }
-
- private static void logInfo(String m) {
- LOG.info(threadLocalId.get().toString() + ": " + m);
- logAuditEvent(m);
- }
-
- private String startFunction(String function, String extraLogInfo) {
- incrementCounter(function);
- logInfo((getThreadLocalIpAddress() == null ? "" : "source:" + getThreadLocalIpAddress() + " ") +
- function + extraLogInfo);
- com.codahale.metrics.Timer timer =
- Metrics.getOrCreateTimer(MetricsConstants.API_PREFIX + function);
- if (timer != null) {
- // Timer will be null we aren't using the metrics
- timerContexts.get().put(function, timer.time());
- }
- Counter counter = Metrics.getOrCreateCounter(MetricsConstants.ACTIVE_CALLS + function);
- if (counter != null) counter.inc();
- return function;
- }
-
- private String startFunction(String function) {
- return startFunction(function, "");
- }
-
- private String startTableFunction(String function, String db, String tbl) {
- return startFunction(function, " : db=" + db + " tbl=" + tbl);
- }
-
- private String startMultiTableFunction(String function, String db, List<String> tbls) {
- String tableNames = join(tbls, ",");
- return startFunction(function, " : db=" + db + " tbls=" + tableNames);
- }
-
- private String startPartitionFunction(String function, String db, String tbl,
- List<String> partVals) {
- return startFunction(function, " : db=" + db + " tbl=" + tbl
- + "[" + join(partVals, ",") + "]");
- }
-
- private String startPartitionFunction(String function, String db, String tbl,
- Map<String, String> partName) {
- return startFunction(function, " : db=" + db + " tbl=" + tbl + "partition=" + partName);
- }
-
- private void endFunction(String function, boolean successful, Exception e) {
- endFunction(function, successful, e, null);
- }
- private void endFunction(String function, boolean successful, Exception e,
- String inputTableName) {
- endFunction(function, new MetaStoreEndFunctionContext(successful, e, inputTableName));
- }
-
- private void endFunction(String function, MetaStoreEndFunctionContext context) {
- com.codahale.metrics.Timer.Context timerContext = timerContexts.get().remove(function);
- if (timerContext != null) {
- timerContext.close();
- }
- Counter counter = Metrics.getOrCreateCounter(MetricsConstants.ACTIVE_CALLS + function);
- if (counter != null) counter.dec();
-
- for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
- listener.onEndFunction(function, context);
- }
- }
-
- @Override
- public fb_status getStatus() {
- return fb_status.ALIVE;
- }
-
- @Override
- public void shutdown() {
- cleanupRawStore();
- PerfLogger.getPerfLogger(false).cleanupPerfLogMetrics();
- }
-
- @Override
- public AbstractMap<String, Long> getCounters() {
- AbstractMap<String, Long> counters = super.getCounters();
-
- // Allow endFunctionListeners to add any counters they have collected
- if (endFunctionListeners != null) {
- for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
- listener.exportCounters(counters);
- }
- }
-
- return counters;
- }
-
- private void create_database_core(RawStore ms, final Database db)
- throws AlreadyExistsException, InvalidObjectException, MetaException {
- if (!validateName(db.getName(), null)) {
- throw new InvalidObjectException(db.getName() + " is not a valid database name");
- }
-
- if (null == db.getLocationUri()) {
- db.setLocationUri(wh.getDefaultDatabasePath(db.getName()).toString());
- } else {
- db.setLocationUri(wh.getDnsPath(new Path(db.getLocationUri())).toString());
- }
-
- Path dbPath = new Path(db.getLocationUri());
- boolean success = false;
- boolean madeDir = false;
- Map<String, String> transactionalListenersResponses = Collections.emptyMap();
- try {
- firePreEvent(new PreCreateDatabaseEvent(db, this));
- if (!wh.isDir(dbPath)) {
- if (!wh.mkdirs(dbPath)) {
- throw new MetaException("Unable to create database path " + dbPath +
- ", failed to create database " + db.getName());
- }
- madeDir = true;
- }
-
- ms.openTransaction();
- ms.createDatabase(db);
-
- if (!transactionalListeners.isEmpty()) {
- transactionalListenersResponses =
- MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
- EventType.CREATE_DATABASE,
- new CreateDatabaseEvent(db, true, this));
- }
-
- success = ms.commitTransaction();
- } finally {
- if (!success) {
- ms.rollbackTransaction();
- if (madeDir) {
- wh.deleteDir(dbPath, true);
- }
- }
-
- if (!listeners.isEmpty()) {
- MetaStoreListenerNotifier.notifyEvent(listeners,
- EventType.CREATE_DATABASE,
- new CreateDatabaseEvent(db, success, this),
- null,
- transactionalListenersResponses, ms);
- }
- }
- }
-
- @Override
- public void create_database(final Database db)
- throws AlreadyExistsException, InvalidObjectException, MetaException {
- startFunction("create_database", ": " + db.toString());
- boolean success = false;
- Exception ex = null;
- try {
- try {
- if (null != get_database_core(db.getName())) {
- throw new AlreadyExistsException("Database " + db.getName() + " already exists");
- }
- } catch (NoSuchObjectException e) {
- // expected
- }
-
- if (TEST_TIMEOUT_ENABLED) {
- try {
- Thread.sleep(TEST_TIMEOUT_VALUE);
- } catch (InterruptedException e) {
- // do nothing
- }
- Deadline.checkTimeout();
- }
- create_database_core(getMS(), db);
- success = true;
- } catch (Exception e) {
- ex = e;
- if (e instanceof MetaException) {
- throw (MetaException) e;
- } else if (e instanceof InvalidObjectException) {
- throw (InvalidObjectException) e;
- } else if (e instanceof AlreadyExistsException) {
- throw (AlreadyExistsException) e;
- } else {
- throw newMetaException(e);
- }
- } finally {
- endFunction("create_database", success, ex);
- }
- }
-
- @Override
- public Database get_database(final String name) throws NoSuchObjectException, MetaException {
- startFunction("get_database", ": " + name);
- Database db = null;
- Exception ex = null;
- try {
- db = get_database_core(name);
- firePreEvent(new PreReadDatabaseEvent(db, this));
- } catch (MetaException e) {
- ex = e;
- throw e;
- } catch (NoSuchObjectException e) {
- ex = e;
- throw e;
- } finally {
- endFunction("get_database", db != null, ex);
- }
- return db;
- }
-
- @Override
- public Database get_database_core(final String name) throws NoSuchObjectException,
- MetaException {
- Database db = null;
- try {
- db = getMS().getDatabase(name);
- } catch (MetaException e) {
- throw e;
- } catch (NoSuchObjectException e) {
- throw e;
- } catch (Exception e) {
- assert (e instanceof RuntimeException);
- throw (RuntimeException) e;
- }
- return db;
- }
-
- @Override
- public void alter_database(final String dbName, final Database newDB)
- throws NoSuchObjectException, TException, MetaException {
- startFunction("alter_database" + dbName);
- boolean success = false;
- Exception ex = null;
-
- // Perform the same URI normalization as create_database_core.
- if (newDB.getLocationUri() != null) {
- newDB.setLocationUri(wh.getDnsPath(new Path(newDB.getLocationUri())).toString());
- }
-
- try {
- Database oldDB = get_database_core(dbName);
- if (oldDB == null) {
- throw new MetaException("Could not alter database \"" + dbName + "\". Could not retrieve old definition.");
- }
- firePreEvent(new PreAlterDatabaseEvent(oldDB, newDB, this));
- getMS().alterDatabase(dbName, newDB);
- success = true;
- } catch (Exception e) {
- ex = e;
- rethrowException(e);
- } finally {
- endFunction("alter_database", success, ex);
- }
- }
-
- private void drop_database_core(RawStore ms,
- final String name, final boolean deleteData, final boolean cascade)
- throws NoSuchObjectException, InvalidOperationException, MetaException,
- IOException, InvalidObjectException, InvalidInputException {
- boolean success = false;
- Database db = null;
- List<Path> tablePaths = new ArrayList<Path>();
- List<Path> partitionPaths = new ArrayList<Path>();
- Map<String, String> transactionalListenerResponses = Collections.emptyMap();
- try {
- ms.openTransaction();
- db = ms.getDatabase(name);
-
- firePreEvent(new PreDropDatabaseEvent(db, this));
-
- List<String> allTables = get_all_tables(db.getName());
- List<String> allFunctions = get_functions(db.getName(), "*");
-
- if (!cascade) {
- if (!allTables.isEmpty()) {
- throw new InvalidOperationException(
- "Database " + db.getName() + " is not empty. One or more tables exist.");
- }
- if (!allFunctions.isEmpty()) {
- throw new InvalidOperationException(
- "Database " + db.getName() + " is not empty. One or more functions exist.");
- }
- }
- Path path = new Path(db.getLocationUri()).getParent();
- if (!wh.isWritable(path)) {
- throw new MetaException("Database not dropped since " +
- path + " is not writable by " +
- hiveConf.getUser());
- }
-
- Path databasePath = wh.getDnsPath(wh.getDatabasePath(db));
-
- // drop any functions before dropping db
- for (String funcName : allFunctions) {
- drop_function(name, funcName);
- }
-
- // drop tables before dropping db
- int tableBatchSize = HiveConf.getIntVar(hiveConf,
- ConfVars.METASTORE_BATCH_RETRIEVE_MAX);
-
- int startIndex = 0;
- // retrieve the tables from the metastore in batches to alleviate memory constraints
- while (startIndex < allTables.size()) {
- int endIndex = Math.min(startIndex + tableBatchSize, allTables.size());
-
- List<Table> tables = null;
- try {
- tables = ms.getTableObjectsByName(name, allTables.subList(startIndex, endIndex));
- } catch (UnknownDBException e) {
- throw new MetaException(e.getMessage());
- }
-
- if (tables != null && !tables.isEmpty()) {
- for (Table table : tables) {
-
- // If the table is not external and it might not be in a subdirectory of the database
- // add it's locations to the list of paths to delete
- Path tablePath = null;
- if (table.getSd().getLocation() != null && !isExternal(table)) {
- tablePath = wh.getDnsPath(new Path(table.getSd().getLocation()));
- if (!wh.isWritable(tablePath.getParent())) {
- throw new MetaException("Database metadata not deleted since table: " +
- table.getTableName() + " has a parent location " + tablePath.getParent() +
- " which is not writable by " + hiveConf.getUser());
- }
-
- if (!isSubdirectory(databasePath, tablePath)) {
- tablePaths.add(tablePath);
- }
- }
-
- // For each partition in each table, drop the partitions and get a list of
- // partitions' locations which might need to be deleted
- partitionPaths = dropPartitionsAndGetLocations(ms, name, table.getTableName(),
- tablePath, table.getPartitionKeys(), deleteData && !isExternal(table));
-
- // Drop the table but not its data
- drop_table(name, table.getTableName(), false);
- }
-
- startIndex = endIndex;
- }
- }
-
- if (ms.dropDatabase(name)) {
- if (!transactionalListeners.isEmpty()) {
- transactionalListenerResponses =
- MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
- EventType.DROP_DATABASE,
- new DropDatabaseEvent(db, true, this));
- }
-
- success = ms.commitTransaction();
- }
- } finally {
- if (!success) {
- ms.rollbackTransaction();
- } else if (deleteData) {
- // Delete the data in the partitions which have other locations
- deletePartitionData(partitionPaths);
- // Delete the data in the tables which have other locations
- for (Path tablePath : tablePaths) {
- deleteTableData(tablePath);
- }
- // Delete the data in the database
- try {
- wh.deleteDir(new Path(db.getLocationUri()), true);
- } catch (Exception e) {
- LOG.error("Failed to delete database directory: " + db.getLocationUri() +
- " " + e.getMessage());
- }
- // it is not a terrible thing even if the data is not deleted
- }
-
- if (!listeners.isEmpty()) {
- MetaStoreListenerNotifier.notifyEvent(listeners,
- EventType.DROP_DATABASE,
- new DropDatabaseEvent(db, success, this),
- null,
- transactionalListenerResponses, ms);
- }
- }
- }
-
- /**
- * Returns a BEST GUESS as to whether or not other is a subdirectory of parent. It does not
- * take into account any intricacies of the underlying file system, which is assumed to be
- * HDFS. This should not return any false positives, but may return false negatives.
- *
- * @param parent
- * @param other
- * @return
- */
- private boolean isSubdirectory(Path parent, Path other) {
- return other.toString().startsWith(parent.toString().endsWith(Path.SEPARATOR) ?
- parent.toString() : parent.toString() + Path.SEPARATOR);
- }
-
- @Override
- public void drop_database(final String dbName, final boolean deleteData, final boolean cascade)
- throws NoSuchObjectException, InvalidOperationException, MetaException {
-
- startFunction("drop_database", ": " + dbName);
- if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(dbName)) {
- endFunction("drop_database", false, null);
- throw new MetaException("Can not drop default database");
- }
-
- boolean success = false;
- Exception ex = null;
- try {
- drop_database_core(getMS(), dbName, deleteData, cascade);
- success = true;
- } catch (IOException e) {
- ex = e;
- throw new MetaException(e.getMessage());
- } catch (Exception e) {
- ex = e;
- if (e instanceof MetaException) {
- throw (MetaException) e;
- } else if (e instanceof InvalidOperationException) {
- throw (InvalidOperationException) e;
- } else if (e instanceof NoSuchObjectException) {
- throw (NoSuchObjectException) e;
- } else {
- throw newMetaException(e);
- }
- } finally {
- endFunction("drop_database", success, ex);
- }
- }
-
- @Override
- public List<String> get_databases(final String pattern) throws MetaException {
- startFunction("get_databases", ": " + pattern);
-
- List<String> ret = null;
- Exception ex = null;
- try {
- ret = getMS().getDatabases(pattern);
- } catch (Exception e) {
- ex = e;
- if (e instanceof MetaException) {
- throw (MetaException) e;
- } else {
- throw newMetaException(e);
- }
- } finally {
- endFunction("get_databases", ret != null, ex);
- }
- return ret;
- }
-
- @Override
- public List<String> get_all_databases() throws MetaException {
- startFunction("get_all_databases");
-
- List<String> ret = null;
- Exception ex = null;
- try {
- ret = getMS().getAllDatabases();
- } catch (Exception e) {
- ex = e;
- if (e instanceof MetaException) {
- throw (MetaException) e;
- } else {
- throw newMetaException(e);
- }
- } finally {
- endFunction("get_all_databases", ret != null, ex);
- }
- return ret;
- }
-
- private void create_type_core(final RawStore ms, final Type type)
- throws AlreadyExistsException, MetaException, InvalidObjectException {
- if (!MetaStoreUtils.validateName(type.getName(), null)) {
- throw new InvalidObjectException("Invalid type name");
- }
-
- boolean success = false;
- try {
- ms.openTransaction();
- if (is_type_exists(ms, type.getName())) {
- throw new AlreadyExistsException("Type " + type.getName() + " already exists");
- }
- ms.createType(type);
- success = ms.commitTransaction();
- } finally {
- if (!success) {
- ms.rollbackTransaction();
- }
- }
- }
-
- @Override
- public boolean create_type(final Type type) throws AlreadyExistsException,
- MetaException, InvalidObjectException {
- startFunction("create_type", ": " + type.toString());
- boolean success = false;
- Exception ex = null;
- try {
- create_type_core(getMS(), type);
- success = true;
- } catch (Exception e) {
- ex = e;
- if (e instanceof MetaException) {
- throw (MetaException) e;
- } else if (e instanceof InvalidObjectException) {
- throw (InvalidObjectException) e;
- } else if (e instanceof AlreadyExistsException) {
- throw (AlreadyExistsException) e;
- } else {
- throw newMetaException(e);
- }
- } finally {
- endFunction("create_type", success, ex);
- }
-
- return success;
- }
-
- @Override
- public Type get_type(final String name) throws MetaException, NoSuchObjectException {
- startFunction("get_type", ": " + name);
-
- Type ret = null;
- Exception ex = null;
- try {
- ret = getMS().getType(name);
- if (null == ret) {
- throw new NoSuchObjectException("Type \"" + name + "\" not found.");
- }
- } catch (Exception e) {
- ex = e;
- throwMetaException(e);
- } finally {
- endFunction("get_type", ret != null, ex);
- }
- return ret;
- }
-
- private boolean is_type_exists(RawStore ms, String typeName)
- throws MetaException {
- return (ms.getType(typeName) != null);
- }
-
- @Override
- public boolean drop_type(final String name) throws MetaException, NoSuchObjectException {
- startFunction("drop_type", ": " + name);
-
- boolean success = false;
- Exception ex = null;
- try {
- // TODO:pc validate that there are no types that refer to this
- success = getMS().dropType(name);
- } catch (Exception e) {
- ex = e;
- throwMetaException(e);
- } finally {
- endFunction("drop_type", success, ex);
- }
- return success;
- }
-
- @Override
- public Map<String, Type> get_type_all(String name) throws MetaException {
- // TODO Auto-generated method stub
- startFunction("get_type_all", ": " + name);
- endFunction("get_type_all", false, null);
- throw new MetaException("Not yet implemented");
- }
-
- private void create_table_core(final RawStore ms, final Table tbl,
- final EnvironmentContext envContext)
- throws AlreadyExistsException, MetaException,
- InvalidObjectException, NoSuchObjectException {
- create_table_core(ms, tbl, envContext, null, null, null, null);
- }
-
- private void create_table_core(final RawStore ms, final Table tbl,
- final EnvironmentContext envContext, List<SQLPrimaryKey> primaryKeys,
- List<SQLForeignKey> foreignKeys, List<SQLUniqueConstraint> uniqueConstraints,
- List<SQLNotNullConstraint> notNullConstraints)
- throws AlreadyExistsException, MetaException,
- InvalidObjectException, NoSuchObjectException {
- if (!MetaStoreUtils.validateName(tbl.getTableName(), hiveConf)) {
- throw new InvalidObjectException(tbl.getTableName()
- + " is not a valid object name");
- }
- String validate = MetaStoreUtils.validateTblColumns(tbl.getSd().getCols());
- if (validate != null) {
- throw new InvalidObjectException("Invalid column " + validate);
- }
- if (tbl.getPartitionKeys() != null) {
- validate = MetaStoreUtils.validateTblColumns(tbl.getPartitionKeys());
- if (validate != null) {
- throw new InvalidObjectException("Invalid partition column " + validate);
- }
- }
- SkewedInfo skew = tbl.getSd().getSkewedInfo();
- if (skew != null) {
- validate = MetaStoreUtils.validateSkewedColNames(skew.getSkewedColNames());
- if (validate != null) {
- throw new InvalidObjectException("Invalid skew column " + validate);
- }
- validate = MetaStoreUtils.validateSkewedColNamesSubsetCol(
- skew.getSkewedColNames(), tbl.getSd().getCols());
- if (validate != null) {
- throw new InvalidObjectException("Invalid skew column " + validate);
- }
- }
-
- Map<String, String> transactionalListenerResponses = Collections.emptyMap();
- Path tblPath = null;
- boolean success = false, madeDir = false;
- try {
- firePreEvent(new PreCreateTableEvent(tbl, this));
-
- ms.openTransaction();
-
- Database db = ms.getDatabase(tbl.getDbName());
- if (db == null) {
- throw new NoSuchObjectException("The database " + tbl.getDbName() + " does not exist");
- }
-
- // get_table checks whether database exists, it should be moved here
- if (is_table_exists(ms, tbl.getDbName(), tbl.getTableName())) {
- throw new AlreadyExistsException("Table " + tbl.getTableName()
- + " already exists");
- }
-
- if (!TableType.VIRTUAL_VIEW.toString().equals(tbl.getTableType())) {
- if (tbl.getSd().getLocation() == null
- || tbl.getSd().getLocation().isEmpty()) {
- tblPath = wh.getDefaultTablePath(
- ms.getDatabase(tbl.getDbName()), tbl.getTableName());
- } else {
- if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) {
- LOG.warn("Location: " + tbl.getSd().getLocation()
- + " specified for non-external table:" + tbl.getTableName());
- }
- tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation()));
- }
- tbl.getSd().setLocation(tblPath.toString());
- }
-
- if (tblPath != null) {
- if (!wh.isDir(tblPath)) {
- if (!wh.mkdirs(tblPath)) {
- throw new MetaException(tblPath
- + " is not a directory or unable to create one");
- }
- madeDir = true;
- }
- }
- if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) &&
- !MetaStoreUtils.isView(tbl)) {
- MetaStoreUtils.updateTableStatsFast(db, tbl, wh, madeDir, envContext);
- }
-
- // set create time
- long time = System.currentTimeMillis() / 1000;
- tbl.setCreateTime((int) time);
- if (tbl.getParameters() == null ||
- tbl.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) {
- tbl.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
- }
- if (primaryKeys == null && foreignKeys == null
- && uniqueConstraints == null && notNullConstraints == null) {
- ms.createTable(tbl);
- } else {
- // Set constraint name if null before sending to listener
- List<String> constraintNames = ms.createTableWithConstraints(tbl, primaryKeys, foreignKeys,
- uniqueConstraints, notNullConstraints);
- int primaryKeySize = 0;
- if (primaryKeys != null) {
- primaryKeySize = primaryKeys.size();
- for (int i = 0; i < primaryKeys.size(); i++) {
- if (primaryKeys.get(i).getPk_name() == null) {
- primaryKeys.get(i).setPk_name(constraintNames.get(i));
- }
- }
- }
- int foreignKeySize = 0;
- if (foreignKeys != null) {
- foreignKeySize = foreignKeys.size();
- for (int i = 0; i < foreignKeySize; i++) {
- if (foreignKeys.get(i).getFk_name() == null) {
- foreignKeys.get(i).setFk_name(constraintNames.get(primaryKeySize + i));
- }
- }
- }
- int uniqueConstraintSize = 0;
- if (uniqueConstraints != null) {
- uniqueConstraintSize = uniqueConstraints.size();
- for (int i = 0; i < uniqueConstraintSize; i++) {
- if (uniqueConstraints.get(i).getUk_name() == null) {
- uniqueConstraints.get(i).setUk_name(constraintNames.get(primaryKeySize + foreignKeySize + i));
- }
- }
- }
- if (notNullConstraints != null) {
- for (int i = 0; i < notNullConstraints.size(); i++) {
- if (notNullConstraints.get(i).getNn_name() == null) {
- notNullConstraints.get(i).setNn_name(constraintNames.get(primaryKeySize + foreignKeySize + uniqueConstraintSize + i));
- }
- }
- }
- }
-
- if (!transactionalListeners.isEmpty()) {
- transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
- EventType.CREATE_TABLE, new CreateTableEvent(tbl, true, this), envContext);
- if (primaryKeys != null && !primaryKeys.isEmpty()) {
- MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PRIMARYKEY,
- new AddPrimaryKeyEvent(primaryKeys, true, this), envContext);
- }
- if (foreignKeys != null && !foreignKeys.isEmpty()) {
- MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_FOREIGNKEY,
- new AddForeignKeyEvent(foreignKeys, true, this), envContext);
- }
- if (uniqueConstraints != null && !uniqueConstraints.isEmpty()) {
- MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_UNIQUECONSTRAINT,
- new AddUniqueConstraintEvent(uniqueConstraints, true, this), envContext);
- }
- if (notNullConstraints != null && !notNullConstraints.isEmpty()) {
- MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_NOTNULLCONSTRAINT,
- new AddNotNullConstraintEvent(notNullConstraints, true, this), envContext);
- }
- }
-
- success = ms.commitTransaction();
- } finally {
- if (!success) {
- ms.rollbackTransaction();
- if (madeDir) {
- wh.deleteDir(tblPath, true);
- }
- }
-
- if (!listeners.isEmpty()) {
- MetaStoreListenerNotifier.notifyEvent(listeners, EventType.CREATE_TABLE,
- new CreateTableEvent(tbl, success, this), envContext, transactionalListenerResponses, ms);
- if (primaryKeys != null && !primaryKeys.isEmpty()) {
- MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PRIMARYKEY,
- new AddPrimaryKeyEvent(primaryKeys, success, this), envContext);
- }
- if (foreignKeys != null && !foreignKeys.isEmpty()) {
- MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_FOREIGNKEY,
- new AddForeignKeyEvent(foreignKeys, success, this), envContext);
- }
- if (uniqueConstraints != null && !uniqueConstraints.isEmpty()) {
- MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_UNIQUECONSTRAINT,
- new AddUniqueConstraintEvent(uniqueConstraints, success, this), envContext);
- }
- if (notNullConstraints != null && !notNullConstraints.isEmpty()) {
- MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_NOTNULLCONSTRAINT,
- new AddNotNullConstraintEvent(notNullConstraints, success, this), envContext);
- }
- }
- }
- }
-
- @Override
- public void create_table(final Table tbl) throws AlreadyExistsException,
- MetaException, InvalidObjectException {
- create_table_with_environment_context(tbl, null);
- }
-
- @Override
- public void create_table_with_environment_context(final Table tbl,
- final EnvironmentContext envContext)
- throws AlreadyExistsException, MetaException, InvalidObjectException {
- startFunction("create_table", ": " + tbl.toString());
- boolean success = false;
- Exception ex = null;
- try {
- create_table_core(getMS(), tbl, envContext);
- success = true;
- } catch (NoSuchObjectException e) {
- ex = e;
- throw new InvalidObjectException(e.getMessage());
- } catch (Exception e) {
- ex = e;
- if (e instanceof MetaException) {
- throw (MetaException) e;
- } else if (e instanceof InvalidObjectException) {
- throw (InvalidObjectException) e;
- } else if (e instanceof AlreadyExistsException) {
- throw (AlreadyExistsException) e;
- } else {
- throw newMetaException(e);
- }
- } finally {
- endFunction("create_table", success, ex, tbl.getTableName());
- }
- }
-
- @Override
- public void create_table_with_constraints(final Table tbl,
- final List<SQLPrimaryKey> primaryKeys, final List<SQLForeignKey> foreignKeys,
- List<SQLUniqueConstraint> uniqueConstraints,
- List<SQLNotNullConstraint> notNullConstraints)
- throws AlreadyExistsException, MetaException, InvalidObjectException {
- startFunction("create_table", ": " + tbl.toString());
- boolean success = false;
- Exception ex = null;
- try {
- create_table_core(getMS(), tbl, null, primaryKeys, foreignKeys,
- uniqueConstraints, notNullConstraints);
- success = true;
- } catch (NoSuchObjectException e) {
- ex = e;
- throw new InvalidObjectException(e.getMessage());
- } catch (Exception e) {
- ex = e;
- if (e instanceof MetaException) {
- throw (MetaException) e;
- } else if (e instanceof InvalidObjectException) {
- throw (InvalidObjectException) e;
- } else if (e instanceof AlreadyExistsException) {
- throw (AlreadyExistsException) e;
- } else {
- throw newMetaException(e);
- }
- } finally {
- endFunction("create_table", success, ex, tbl.getTableName());
- }
- }
-
- @Override
- public void drop_constraint(DropConstraintRequest req)
- throws MetaException, InvalidObjectException {
- String dbName = req.getDbname();
- String tableName = req.getTablename();
- String constraintName = req.getConstraintname();
- startFunction("drop_constraint", ": " + constraintName.toString());
- boolean success = false;
- Exception ex = null;
- RawStore ms = getMS();
- try {
- ms.openTransaction();
- ms.dropConstraint(dbName, tableName, constraintName);
- if (transactionalListeners.size() > 0) {
- DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(dbName,
- tableName, constraintName, true, this);
- for (MetaStoreEventListener transactionalListener : transactionalListeners) {
- transactionalListener.onDropConstraint(dropConstraintEvent);
- }
- }
- success = ms.commitTransaction();
- } catch (NoSuchObjectException e) {
- ex = e;
- throw new InvalidObjectException(e.getMessage());
- } catch (Exception e) {
- ex = e;
- if (e instanceof MetaException) {
- throw (MetaException) e;
- } else if (e instanceof InvalidObjectException) {
- throw (InvalidObjectException) e;
- } else {
- throw newMetaException(e);
- }
- } finally {
- if (!success) {
- ms.rollbackTransaction();
- } else {
- for (MetaStoreEventListener listener : listeners) {
- DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(dbName,
- tableName, constraintName, true, this);
- listener.onDropConstraint(dropConstraintEvent);
- }
- }
- endFunction("drop_constraint", success, ex, constraintName);
- }
- }
-
- @Override
- public void add_primary_key(AddPrimaryKeyRequest req)
- throws MetaException, InvalidObjectException {
- List<SQLPrimaryKey> primaryKeyCols = req.getPrimaryKeyCols();
- String constraintName = (primaryKeyCols != null && primaryKeyCols.size() > 0) ?
- primaryKeyCols.get(0).getPk_name() : "null";
- startFunction("add_primary_key", ": " + constraintName);
- boolean success = false;
- Exception ex = null;
- RawStore ms = getMS();
- try {
- ms.openTransaction();
- List<String> constraintNames = ms.addPrimaryKeys(primaryKeyCols);
- // Set primary key name if null before sending to listener
- if (primaryKeyCols != null) {
- for (int i = 0; i < primaryKeyCols.size(); i++) {
- if (primaryKeyCols.get(i).getPk_name() == null) {
- primaryKeyCols.get(i).setPk_name(constraintNames.get(i));
- }
- }
- }
- if (transactionalListeners.size() > 0) {
- if (primaryKeyCols != null && primaryKeyCols.size() > 0) {
- AddPrimaryKeyEvent addPrimaryKeyEvent = new AddPrimaryKeyEvent(primaryKeyCols, true, this);
- for (MetaStoreEventListener transactionalListener : transactionalListeners) {
- transactionalListener.onAddPrimaryKey(addPrimaryKeyEvent);
- }
- }
- }
- success = ms.commitTransaction();
- } catch (Exception e) {
- ex = e;
- if (e instanceof MetaException) {
- throw (MetaException) e;
- } else if (e instanceof InvalidObjectException) {
- throw (InvalidObjectException) e;
- } else {
- throw newMetaException(e);
- }
- } finally {
- if (!success) {
- ms.rollbackTransaction();
- } else if (primaryKeyCols != null && primaryKeyCols.size() > 0) {
- for (MetaStoreEventListener listener : listeners) {
- AddPrimaryKeyEvent addPrimaryKeyEvent = new AddPrimaryKeyEvent(primaryKeyCols, true, this);
- listener.onAddPrimaryKey(addPrimaryKeyEvent);
- }
- }
- endFunction("add_primary_key", success, ex, constraintName);
- }
- }
-
- @Override
- public void add_foreign_key(AddForeignKeyRequest req)
- throws MetaException, InvalidObjectException {
- List<SQLForeignKey> foreignKeyCols = req.getForeignKeyCols();
- String constraintName = (foreignKeyCols != null && foreignKeyCols.size() > 0) ?
- foreignKeyCols.get(0).getFk_name() : "null";
- startFunction("add_foreign_key", ": " + constraintName);
- boolean success = false;
- Exception ex = null;
- RawStore ms = getMS();
- try {
- ms.openTransaction();
- List<String> constraintNames = ms.addForeignKeys(foreignKeyCols);
- // Set foreign key name if null before sending to listener
- if (foreignKeyCols != null) {
- for (int i = 0; i < foreignKeyCols.size(); i++) {
- if (foreignKeyCols.get(i).getFk_name() == null) {
- foreignKeyCols.get(i).setFk_name(constraintNames.get(i));
- }
- }
- }
- if (transactionalListeners.size() > 0) {
- if (foreignKeyCols != null && foreignKeyCols.size() > 0) {
- AddForeignKeyEvent addForeignKeyEvent = new AddForeignKeyEvent(foreignKeyCols, true, this);
- for (MetaStoreEventListener transactionalListener : transactionalListeners) {
- transactionalListener.onAddForeignKey(addForeignKeyEvent);
- }
- }
- }
- success = ms.commitTransaction();
- } catch (Exception e) {
- ex = e;
- if (e instanceof MetaException) {
- throw (MetaException) e;
- } else if (e instanceof InvalidObjectException) {
- throw (InvalidObjectException) e;
- } else {
- throw newMetaException(e);
- }
- } finally {
- if (!success) {
- ms.rollbackTransaction();
- } else if (foreignKeyCols != null && foreignKeyCols.size() > 0) {
- for (MetaStoreEventListener listener : listeners) {
- AddForeignKeyEvent addForeignKeyEvent = new AddForeignKeyEvent(foreignKeyCols, true, this);
- listener.onAddForeignKey(addForeignKeyEvent);
- }
- }
- endFunction("add_foreign_key", success, ex, constraintName);
- }
- }
-
- @Override
- public void add_unique_constraint(AddUniqueConstraintRequest req)
- throws MetaException, InvalidObjectException {
- List<SQLUniqueConstraint> uniqueConstraintCols = req.getUniqueConstraintCols();
- String constraintName = (uniqueConstraintCols != null && uniqueConstraintCols.size() > 0) ?
- uniqueConstraintCols.get(0).getUk_name() : "null";
- startFunction("add_unique_constraint", ": " + constraintName);
- boolean success = false;
- Exception ex = null;
- RawStore ms = getMS();
- try {
- ms.openTransaction();
- List<String> constraintNames = ms.addUniqueConstraints(uniqueConstraintCols);
- // Set unique constraint name if null before sending to listener
- if (uniqueConstraintCols != null) {
- for (int i = 0; i < uniqueConstraintCols.size(); i++) {
- if (uniqueConstraintCols.get(i).getUk_name() == null) {
- uniqueConstraintCols.get(i).setUk_name(constraintNames.get(i));
- }
- }
- }
- if (transactionalListeners.size() > 0) {
- if (uniqueConstraintCols != null && uniqueConstraintCols.size() > 0) {
- AddUniqueConstraintEvent addUniqueConstraintEvent = new AddUniqueConstraintEvent(uniqueConstraintCols, true, this);
- for (MetaStoreEventListener transactionalListener : transactionalListeners) {
- transactionalListener.onAddUniqueConstraint(addUniqueConstraintEvent);
- }
- }
- }
- success = ms.commitTransaction();
- } catch (Exception e) {
- ex = e;
- if (e instanceof MetaException) {
- throw (MetaException) e;
- } else if (e instanceof InvalidObjectException) {
- throw (InvalidObjectException) e;
- } else {
- throw newMetaException(e);
- }
- } finally {
- if (!success) {
- ms.rollbackTransaction();
- } else if (uniqueConstraintCols != null && uniqueConstraintCols.size() > 0) {
- for (MetaStoreEventListener listener : listeners) {
- AddUniqueConstraintEvent addUniqueConstraintEvent = new AddUniqueConstraintEvent(uniqueConstraintCols, true, this);
- listener.onAddUniqueConstraint(addUniqueConstraintEvent);
- }
- }
- endFunction("add_unique_constraint", success, ex, constraintName);
- }
- }
-
- @Override
- public void add_not_null_constraint(AddNotNullConstraintRequest req)
- throws MetaException, InvalidObjectException {
- List<SQLNotNullConstraint> notNullConstraintCols = req.getNotNullConstraintCols();
- String constraintName = (notNullConstraintCols != null && notNullConstraintCols.size() > 0) ?
- notNullConstraintCols.get(0).getNn_name() : "null";
- startFunction("add_not_null_constraint", ": " + constraintName);
- boolean success = false;
- Exception ex = null;
- RawStore ms = getMS();
- try {
- ms.openTransaction();
- List<String> constraintNames = ms.addNotNullConstraints(notNullConstraintCols);
- // Set not null constraint name if null before sending to listener
- if (notNullConstraintCols != null) {
- for (int i = 0; i < notNullConstraintCols.size(); i++) {
- if (notNullConstraintCols.get(i).getNn_name() == null) {
- notNullConstraintCols.get(i).setNn_name(constraintNames.get(i));
- }
- }
- }
- if (transactionalListeners.size() > 0) {
- if (notNullConstraintCols != null && notNullConstraintCols.size() > 0) {
- AddNotNullConstraintEvent addNotNullConstraintEvent = new AddNotNullConstraintEvent(notNullConstraintCols, true, this);
- for (MetaStoreEventListener transactionalListener : transactionalListeners) {
- transactionalListener.onAddNotNullConstraint(addNotNullConstraintEvent);
- }
- }
- }
- success = ms.commitTransaction();
- } catch (Exception e) {
- ex = e;
- if (e instanceof MetaException) {
- throw (MetaException) e;
- } else if (e instanceof InvalidObjectException) {
- throw (InvalidObjectException) e;
- } else {
- throw newMetaException(e);
- }
- } finally {
- if (!success) {
- ms.rollbackTransaction();
- } else if (notNullConstraintCols != null && notNullConstraintCols.size() > 0) {
- for (MetaStoreEventListener listener : listeners) {
- AddNotNullConstraintEvent addNotNullConstraintEvent = new AddNotNullConstraintEvent(notNullConstraintCols, true, this);
- listener.onAddNotNullConstraint(addNotNullConstraintEvent);
- }
- }
- endFunction("add_not_null_constraint", success, ex, constraintName);
- }
- }
-
- private boolean is_table_exists(RawStore ms, String dbname, String name)
- throws MetaException {
- return (ms.getTable(dbname, name) != null);
- }
-
- private boolean drop_table_core(final RawStore ms, final String dbname, final String name,
- final boolean deleteData, final EnvironmentContext envContext,
- final String indexName) throws NoSuchObjectException,
- MetaException, IOException, InvalidObjectException, InvalidInputException {
- boolean success = false;
- boolean isExternal = false;
- Path tblPath = null;
- List<Path> partPaths = null;
- Table tbl = null;
- boolean ifPurge = false;
- Map<String, String> transactionalListenerResponses = Collections.emptyMap();
- try {
- ms.openTransaction();
- // drop any partitions
- tbl = get_table_core(dbname, name);
- if (tbl == null) {
- throw new NoSuchObjectException(name + " doesn't exist");
- }
- if (tbl.getSd() == null) {
- throw new MetaException("Table metadata is corrupted");
- }
- ifPurge = isMustPurge(envContext, tbl);
-
- firePreEvent(new PreDropTableEvent(tbl, deleteData, this));
-
- boolean isIndexTable = isIndexTable(tbl);
- if (indexName == null && isIndexTable) {
- throw new RuntimeException(
- "The table " + name + " is an index table. Please do drop index instead.");
- }
-
- if (!isIndexTable) {
- try {
- List<Index> indexes = ms.getIndexes(dbname, name, Short.MAX_VALUE);
- while (indexes != null && indexes.size() > 0) {
- for (Index idx : indexes) {
- this.drop_index_by_name(dbname, name, idx.getIndexName(), true);
- }
- indexes = ms.getIndexes(dbname, name, Short.MAX_VALUE);
- }
- } catch (TException e) {
- throw new MetaException(e.getMessage());
- }
- }
- isExternal = isExternal(tbl);
- if (tbl.getSd().getLocation() != null) {
- tblPath = new Path(tbl.getSd().getLocation());
- if (!wh.isWritable(tblPath.getParent())) {
- String target = indexName == null ? "Table" : "Index table";
- throw new MetaException(target + " metadata not deleted since " +
- tblPath.getParent() + " is not writable by " +
- hiveConf.getUser());
- }
- }
-
- // Drop the partitions and get a list of locations which need to be deleted
- partPaths = dropPartitionsAndGetLocations(ms, dbname, name, tblPath,
- tbl.getPartitionKeys(), deleteData && !isExternal);
- if (!ms.dropTable(dbname, name)) {
- String tableName = dbname + "." + name;
- throw new MetaException(indexName == null ? "Unable to drop table " + tableName:
- "Unable to drop index table " + tableName + " for index " + indexName);
- } else {
- if (!transactionalListeners.isEmpty()) {
- transactionalListenerResponses =
- MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
- EventType.DROP_TABLE,
- new DropTableEvent(tbl, true, deleteData, this),
- envContext);
- }
- success = ms.commitTransaction();
- }
- } finally {
- if (!success) {
- ms.rollbackTransaction();
- } else if (deleteData && !isExternal) {
- // Data needs deletion. Check if trash may be skipped.
- // Delete the data in the partitions which have other locations
- deletePartitionData(partPaths, ifPurge);
- // Delete the data in the table
- deleteTableData(tblPath, ifPurge);
- // ok even if the data is not deleted
- }
-
- if (!listeners.isEmpty()) {
- MetaStoreListenerNotifier.notifyEvent(listeners,
- EventType.DROP_TABLE,
- new DropTableEvent(tbl, success, deleteData, this),
- envContext,
- transactionalListenerResponses, ms);
- }
- }
- return success;
- }
-
- /**
- * Deletes the data in a table's location, if it fails logs an error
- *
- * @param tablePath
- */
- private void deleteTableData(Path tablePath) {
- deleteTableData(tablePath, false);
- }
-
- /**
- * Deletes the data in a table's location, if it fails logs an error
- *
- * @param tablePath
- * @param ifPurge completely purge the table (skipping trash) while removing
- * data from warehouse
- */
- private void deleteTableData(Path tablePath, boolean ifPurge) {
-
- if (tablePath != null) {
- try {
- wh.deleteDir(tablePath, true, ifPurge);
- } catch (Exception e) {
- LOG.error("Failed to delete table directory: " + tablePath +
- " " + e.getMessage());
- }
- }
- }
-
- /**
- * Give a list of partitions' locations, tries to delete each one
- * and for each that fails logs an error.
- *
- * @param partPaths
- */
- private void deletePartitionData(List<Path> partPaths) {
- deletePartitionData(partPaths, false);
- }
-
- /**
- * Give a list of partitions' locations, tries to delete each one
- * and for each that fails logs an error.
- *
- * @param partPaths
- * @param ifPurge completely purge the partition (skipping trash) while
- * removing data from warehouse
- */
- private void deletePartitionData(List<Path> partPaths, boolean ifPurge) {
- if (partPaths != null && !partPaths.isEmpty()) {
- for (Path partPath : partPaths) {
- try {
- wh.deleteDir(partPath, true, ifPurge);
- } catch (Exception e) {
- LOG.error("Failed to delete partition directory: " + partPath +
- " " + e.getMessage());
- }
- }
- }
- }
-
- /**
- * Retrieves the partitions specified by partitionKeys. If checkLocation, for locations of
- * partitions which may not be subdirectories of tablePath checks to make the locations are
- * writable.
- *
- * Drops the metadata for each partition.
- *
- * Provides a list of locations of partitions which may not be subdirectories of tablePath.
- *
- * @param ms
- * @param dbName
- * @param tableName
- * @param tablePath
- * @param partitionKeys
- * @param checkLocation
- * @return
- * @throws MetaException
- * @throws IOException
- * @throws InvalidInputException
- * @throws InvalidObjectException
- * @throws NoSuchObjectException
- */
- private List<Path> dropPartitionsAndGetLocations(RawStore ms, String dbName,
- String tableName, Path tablePath, List<FieldSchema> partitionKeys, boolean checkLocation)
- throws MetaException, IOException, NoSuchObjectException, InvalidObjectException,
- InvalidInputException {
- int partitionBatchSize = HiveConf.getIntVar(hiveConf,
- ConfVars.METASTORE_BATCH_RETRIEVE_MAX);
- Path tableDnsPath = null;
- if (tablePath != null) {
- tableDnsPath = wh.getDnsPath(tablePath);
- }
- List<Path> partPaths = new ArrayList<Path>();
- Table tbl = ms.getTable(dbName, tableName);
-
- // call dropPartition on each of the table's partitions to follow the
- // procedure for cleanly dropping partitions.
- while (true) {
- List<Partition> partsToDelete = ms.getPartitions(dbName, tableName, partitionBatchSize);
- if (partsToDelete == null || partsToDelete.isEmpty()) {
- break;
- }
- List<String> partNames = new ArrayList<String>();
- for (Partition part : partsToDelete) {
- if (checkLocation && part.getSd() != null &&
- part.getSd().getLocation() != null) {
-
- Path partPath = wh.getDnsPath(new Path(part.getSd().getLocation()));
- if (tableDnsPath == null ||
- (partPath != null && !isSubdirectory(tableDnsPath, partPath))) {
- if (!wh.isWritable(partPath.getParent())) {
- throw new MetaException("Table metadata not deleted since the partition " +
- Warehouse.makePartName(partitionKeys, part.getValues()) +
- " has parent location " + partPath.getParent() + " which is not writable " +
- "by " + hiveConf.getUser());
- }
- partPaths.add(partPath);
- }
- }
- partNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues()));
- }
- for (MetaStoreEventListener listener : listeners) {
- //No drop part listener events fired for public listeners historically, for drop table case.
- //Limiting to internal listeners for now, to avoid unexpected calls for public listeners.
- if (listener instanceof HMSMetricsListener) {
- for (@SuppressWarnings("unused") Partition part : partsToDelete) {
- listener.onDropPartition(null);
- }
- }
- }
- ms.dropPartitions(dbName, tableName, partNames);
- }
-
- return partPaths;
- }
-
- @Override
- public void drop_table(final String dbname, final String name, final boolean deleteData)
- throws NoSuchObjectException, MetaException {
- drop_table_with_environment_context(dbname, name, deleteData, null);
- }
-
- @Override
- public void drop_table_with_environment_context(final String dbname, final String name,
- final boolean deleteData, final EnvironmentContext envContext)
- throws NoSuchObjectException, MetaException {
- startTableFunction("drop_table", dbname, name);
-
- boolean success = false;
- Exception ex = null;
- try {
- success = drop_table_core(getMS(), dbname, name, deleteData, envContext, null);
- } catch (IOException e) {
- ex = e;
- throw new MetaException(e.getMessage());
- } catch (Exception e) {
- ex = e;
- throwMetaException(e);
- } finally {
- endFunction("drop_table", success, ex, name);
- }
-
- }
-
- private void updateStatsForTruncate(Map<String,String> props, EnvironmentContext environmentContext) {
- if (null == props) {
- return;
- }
- for (String stat : StatsSetupConst.supportedStats) {
- String statVal = props.get(stat);
- if (statVal != null) {
- //In the case of truncate table, we set the stats to be 0.
- props.put(stat, "0");
- }
- }
- //first set basic stats to true
- StatsSetupConst.setBasicStatsState(props, StatsSetupConst.TRUE);
- environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
- //then invalidate column stats
- StatsSetupConst.clearColumnStatsState(props);
- return;
- }
-
- private void alterPartitionForTruncate(final RawStore ms,
- final String dbName,
- final String tableName,
- final Table table,
-
<TRUNCATED>
[3/7] hive git commit: HIVE-17967 Move HiveMetaStore class. This
closes #270 (Alan Gates, reviewed by Thejas Nair).
Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
new file mode 100644
index 0000000..7a636aa
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -0,0 +1,8037 @@
+/* * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import static org.apache.commons.lang.StringUtils.join;
+import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT;
+import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.security.PrivilegedExceptionAction;
+import java.util.AbstractMap;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.regex.Pattern;
+
+import javax.jdo.JDOException;
+
+import com.codahale.metrics.Counter;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableListMultimap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Multimaps;
+
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.metastore.api.*;
+import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent;
+import org.apache.hadoop.hive.metastore.cache.CachedStore;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AddPrimaryKeyEvent;
+import org.apache.hadoop.hive.metastore.events.AddUniqueConstraintEvent;
+import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
+import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropConstraintEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.events.InsertEvent;
+import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.events.PreAddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreAuthorizationCallEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreEventContext;
+import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
+import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
+import org.apache.hadoop.hive.metastore.metrics.JvmPauseMonitor;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.metrics.PerfLogger;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
+import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.hive.metastore.utils.CommonCliOptions;
+import org.apache.hadoop.hive.metastore.utils.FileUtils;
+import org.apache.hadoop.hive.metastore.utils.HdfsUtils;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.apache.hadoop.hive.metastore.utils.LogUtils;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.ShutdownHookManager;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.thrift.TException;
+import org.apache.thrift.TProcessor;
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.protocol.TProtocolFactory;
+import org.apache.thrift.server.ServerContext;
+import org.apache.thrift.server.TServer;
+import org.apache.thrift.server.TServerEventHandler;
+import org.apache.thrift.server.TThreadPoolServer;
+import org.apache.thrift.transport.TFramedTransport;
+import org.apache.thrift.transport.TServerSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+import com.facebook.fb303.FacebookBase;
+import com.facebook.fb303.fb_status;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Splitter;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+ * TODO:pc remove application logic to a separate interface.
+ */
+public class HiveMetaStore extends ThriftHiveMetastore {
+ public static final Logger LOG = LoggerFactory.getLogger(HiveMetaStore.class);
+ public static final String PARTITION_NUMBER_EXCEED_LIMIT_MSG =
+ "Number of partitions scanned (=%d) on table '%s' exceeds limit (=%d). This is controlled on the metastore server by %s.";
+
+ // boolean that tells if the HiveMetaStore (remote) server is being used.
+ // Can be used to determine if the calls to metastore api (HMSHandler) are being made with
+ // embedded metastore or a remote one
+ private static boolean isMetaStoreRemote = false;
+
+ // Used for testing to simulate method timeout.
+ @VisibleForTesting
+ static boolean TEST_TIMEOUT_ENABLED = false;
+ @VisibleForTesting
+ static long TEST_TIMEOUT_VALUE = -1;
+
+ private static ShutdownHookManager shutdownHookMgr;
+
+ public static final String ADMIN = "admin";
+ public static final String PUBLIC = "public";
+ /** MM write states. */
+ public static final char MM_WRITE_OPEN = 'o', MM_WRITE_COMMITTED = 'c', MM_WRITE_ABORTED = 'a';
+
+ private static HadoopThriftAuthBridge.Server saslServer;
+ private static MetastoreDelegationTokenManager delegationTokenManager;
+ private static boolean useSasl;
+
+ static final String NO_FILTER_STRING = "";
+ static final int UNLIMITED_MAX_PARTITIONS = -1;
+
+ private static final class ChainedTTransportFactory extends TTransportFactory {
+ private final TTransportFactory parentTransFactory;
+ private final TTransportFactory childTransFactory;
+
+ private ChainedTTransportFactory(
+ TTransportFactory parentTransFactory,
+ TTransportFactory childTransFactory) {
+ this.parentTransFactory = parentTransFactory;
+ this.childTransFactory = childTransFactory;
+ }
+
+ @Override
+ public TTransport getTransport(TTransport trans) {
+ return childTransFactory.getTransport(parentTransFactory.getTransport(trans));
+ }
+ }
+
+ public static class HMSHandler extends FacebookBase implements IHMSHandler {
+ public static final Logger LOG = HiveMetaStore.LOG;
+ private final Configuration conf; // stores datastore (jpox) properties,
+ // right now they come from jpox.properties
+
+ private static String currentUrl;
+ private FileMetadataManager fileMetadataManager;
+ private PartitionExpressionProxy expressionProxy;
+ private StorageSchemaReader storageSchemaReader;
+
+ // Variables for metrics
+ // Package visible so that HMSMetricsListener can see them.
+ static AtomicInteger databaseCount, tableCount, partCount;
+
+ private Warehouse wh; // hdfs warehouse
+ private static final ThreadLocal<RawStore> threadLocalMS =
+ new ThreadLocal<RawStore>() {
+ @Override
+ protected RawStore initialValue() {
+ return null;
+ }
+ };
+
+ private static final ThreadLocal<TxnStore> threadLocalTxn = new ThreadLocal<TxnStore>() {
+ @Override
+ protected TxnStore initialValue() {
+ return null;
+ }
+ };
+
+ private static final ThreadLocal<Map<String, com.codahale.metrics.Timer.Context>> timerContexts =
+ new ThreadLocal<Map<String, com.codahale.metrics.Timer.Context>>() {
+ @Override
+ protected Map<String, com.codahale.metrics.Timer.Context> initialValue() {
+ return new HashMap<>();
+ }
+ };
+
+ public static RawStore getRawStore() {
+ return threadLocalMS.get();
+ }
+
+ static void removeRawStore() {
+ threadLocalMS.remove();
+ }
+
+ // Thread local configuration is needed as many threads could make changes
+ // to the conf using the connection hook
+ private static final ThreadLocal<Configuration> threadLocalConf =
+ new ThreadLocal<Configuration>() {
+ @Override
+ protected Configuration initialValue() {
+ return null;
+ }
+ };
+
+ /**
+ * Thread local HMSHandler used during shutdown to notify meta listeners
+ */
+ private static final ThreadLocal<HMSHandler> threadLocalHMSHandler = new ThreadLocal<>();
+
+ /**
+ * Thread local Map to keep track of modified meta conf keys
+ */
+ private static final ThreadLocal<Map<String, String>> threadLocalModifiedConfig =
+ new ThreadLocal<>();
+
+ private static ExecutorService threadPool;
+
+ static final Logger auditLog = LoggerFactory.getLogger(
+ HiveMetaStore.class.getName() + ".audit");
+
+ private static void logAuditEvent(String cmd) {
+ if (cmd == null) {
+ return;
+ }
+
+ UserGroupInformation ugi;
+ try {
+ ugi = SecurityUtils.getUGI();
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+
+ String address = getIPAddress();
+ if (address == null) {
+ address = "unknown-ip-addr";
+ }
+
+ auditLog.info("ugi={} ip={} cmd={} ", ugi.getUserName(), address, cmd);
+ }
+
+ private static String getIPAddress() {
+ if (useSasl) {
+ if (saslServer != null && saslServer.getRemoteAddress() != null) {
+ return saslServer.getRemoteAddress().getHostAddress();
+ }
+ } else {
+ // if kerberos is not enabled
+ return getThreadLocalIpAddress();
+ }
+ return null;
+ }
+
+ private static int nextSerialNum = 0;
+ private static ThreadLocal<Integer> threadLocalId = new ThreadLocal<Integer>() {
+ @Override
+ protected Integer initialValue() {
+ return nextSerialNum++;
+ }
+ };
+
+ // This will only be set if the metastore is being accessed from a metastore Thrift server,
+ // not if it is from the CLI. Also, only if the TTransport being used to connect is an
+ // instance of TSocket. This is also not set when kerberos is used.
+ private static ThreadLocal<String> threadLocalIpAddress = new ThreadLocal<String>() {
+ @Override
+ protected String initialValue() {
+ return null;
+ }
+ };
+
+ /**
+ * Internal function to notify listeners for meta config change events
+ */
+ private void notifyMetaListeners(String key, String oldValue, String newValue) throws MetaException {
+ for (MetaStoreEventListener listener : listeners) {
+ listener.onConfigChange(new ConfigChangeEvent(this, key, oldValue, newValue));
+ }
+
+ if (transactionalListeners.size() > 0) {
+ // All the fields of this event are final, so no reason to create a new one for each
+ // listener
+ ConfigChangeEvent cce = new ConfigChangeEvent(this, key, oldValue, newValue);
+ for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+ transactionalListener.onConfigChange(cce);
+ }
+ }
+ }
+
+ /**
+ * Internal function to notify listeners to revert back to old values of keys
+ * that were modified during setMetaConf. This would get called from HiveMetaStore#cleanupRawStore
+ */
+ private void notifyMetaListenersOnShutDown() {
+ Map<String, String> modifiedConf = threadLocalModifiedConfig.get();
+ if (modifiedConf == null) {
+ // Nothing got modified
+ return;
+ }
+ try {
+ Configuration conf = threadLocalConf.get();
+ if (conf == null) {
+ throw new MetaException("Unexpected: modifiedConf is non-null but conf is null");
+ }
+ // Notify listeners of the changed value
+ for (Entry<String, String> entry : modifiedConf.entrySet()) {
+ String key = entry.getKey();
+ // curr value becomes old and vice-versa
+ String currVal = entry.getValue();
+ String oldVal = conf.get(key);
+ if (!Objects.equals(oldVal, currVal)) {
+ notifyMetaListeners(key, oldVal, currVal);
+ }
+ }
+ logInfo("Meta listeners shutdown notification completed.");
+ } catch (MetaException e) {
+ LOG.error("Failed to notify meta listeners on shutdown: ", e);
+ }
+ }
+
+ static void setThreadLocalIpAddress(String ipAddress) {
+ threadLocalIpAddress.set(ipAddress);
+ }
+
+ // This will return null if the metastore is not being accessed from a metastore Thrift server,
+ // or if the TTransport being used to connect is not an instance of TSocket, or if kereberos
+ // is used
+ static String getThreadLocalIpAddress() {
+ return threadLocalIpAddress.get();
+ }
+
+ // Make it possible for tests to check that the right type of PartitionExpressionProxy was
+ // instantiated.
+ @VisibleForTesting
+ PartitionExpressionProxy getExpressionProxy() {
+ return expressionProxy;
+ }
+
+ /**
+ * Use {@link #getThreadId()} instead.
+ * @return thread id
+ */
+ @Deprecated
+ public static Integer get() {
+ return threadLocalId.get();
+ }
+
+ @Override
+ public int getThreadId() {
+ return threadLocalId.get();
+ }
+
+ public HMSHandler(String name) throws MetaException {
+ this(name, MetastoreConf.newMetastoreConf(), true);
+ }
+
+ public HMSHandler(String name, Configuration conf) throws MetaException {
+ this(name, conf, true);
+ }
+
+ public HMSHandler(String name, Configuration conf, boolean init) throws MetaException {
+ super(name);
+ this.conf = conf;
+ isInTest = MetastoreConf.getBoolVar(this.conf, ConfVars.HIVE_IN_TEST);
+ if (threadPool == null) {
+ synchronized (HMSHandler.class) {
+ int numThreads = MetastoreConf.getIntVar(conf, ConfVars.FS_HANDLER_THREADS_COUNT);
+ threadPool = Executors.newFixedThreadPool(numThreads,
+ new ThreadFactoryBuilder().setDaemon(true)
+ .setNameFormat("HMSHandler #%d").build());
+ }
+ }
+ if (init) {
+ init();
+ }
+ }
+
+ /**
+ * Use {@link #getConf()} instead.
+ * @return Configuration object
+ */
+ @Deprecated
+ public Configuration getHiveConf() {
+ return conf;
+ }
+
+ private ClassLoader classLoader;
+ private AlterHandler alterHandler;
+ private List<MetaStorePreEventListener> preListeners;
+ private List<MetaStoreEventListener> listeners;
+ private List<TransactionalMetaStoreEventListener> transactionalListeners;
+ private List<MetaStoreEndFunctionListener> endFunctionListeners;
+ private List<MetaStoreInitListener> initListeners;
+ private Pattern partitionValidationPattern;
+ private final boolean isInTest;
+
+ {
+ classLoader = Thread.currentThread().getContextClassLoader();
+ if (classLoader == null) {
+ classLoader = Configuration.class.getClassLoader();
+ }
+ }
+
+ @Override
+ public List<TransactionalMetaStoreEventListener> getTransactionalListeners() {
+ return transactionalListeners;
+ }
+
+ @Override
+ public void init() throws MetaException {
+ initListeners = MetaStoreUtils.getMetaStoreListeners(
+ MetaStoreInitListener.class, conf, MetastoreConf.getVar(conf, ConfVars.INIT_HOOKS));
+ for (MetaStoreInitListener singleInitListener: initListeners) {
+ MetaStoreInitContext context = new MetaStoreInitContext();
+ singleInitListener.onInit(context);
+ }
+
+ String alterHandlerName = MetastoreConf.getVar(conf, ConfVars.ALTER_HANDLER);
+ alterHandler = ReflectionUtils.newInstance(JavaUtils.getClass(
+ alterHandlerName, AlterHandler.class), conf);
+ wh = new Warehouse(conf);
+
+ synchronized (HMSHandler.class) {
+ if (currentUrl == null || !currentUrl.equals(MetaStoreInit.getConnectionURL(conf))) {
+ createDefaultDB();
+ createDefaultRoles();
+ addAdminUsers();
+ currentUrl = MetaStoreInit.getConnectionURL(conf);
+ }
+ }
+
+ //Start Metrics
+ if (MetastoreConf.getBoolVar(conf, ConfVars.METRICS_ENABLED)) {
+ LOG.info("Begin calculating metadata count metrics.");
+ Metrics.initialize(conf);
+ databaseCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_DATABASES);
+ tableCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_TABLES);
+ partCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_PARTITIONS);
+ updateMetrics();
+
+ }
+
+ preListeners = MetaStoreUtils.getMetaStoreListeners(MetaStorePreEventListener.class,
+ conf, MetastoreConf.getVar(conf, ConfVars.PRE_EVENT_LISTENERS));
+ preListeners.add(0, new TransactionalValidationListener(conf));
+ listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, conf,
+ MetastoreConf.getVar(conf, ConfVars.EVENT_LISTENERS));
+ listeners.add(new SessionPropertiesListener(conf));
+ listeners.add(new AcidEventListener(conf));
+ transactionalListeners = MetaStoreUtils.getMetaStoreListeners(TransactionalMetaStoreEventListener.class,
+ conf, MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS));
+ if (Metrics.getRegistry() != null) {
+ listeners.add(new HMSMetricsListener(conf));
+ }
+
+ endFunctionListeners = MetaStoreUtils.getMetaStoreListeners(
+ MetaStoreEndFunctionListener.class, conf, MetastoreConf.getVar(conf, ConfVars.END_FUNCTION_LISTENERS));
+
+ String partitionValidationRegex =
+ MetastoreConf.getVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN);
+ if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) {
+ partitionValidationPattern = Pattern.compile(partitionValidationRegex);
+ } else {
+ partitionValidationPattern = null;
+ }
+
+ ThreadPool.initialize(conf);
+ Collection<String> taskNames =
+ MetastoreConf.getStringCollection(conf, ConfVars.TASK_THREADS_ALWAYS);
+ for (String taskName : taskNames) {
+ MetastoreTaskThread task =
+ JavaUtils.newInstance(JavaUtils.getClass(taskName, MetastoreTaskThread.class));
+ task.setConf(conf);
+ long freq = task.runFrequency(TimeUnit.MILLISECONDS);
+ // For backwards compatibility, since some threads used to be hard coded but only run if
+ // frequency was > 0
+ if (freq > 0) {
+ ThreadPool.getPool().scheduleAtFixedRate(task, freq, freq, TimeUnit.MILLISECONDS);
+
+ }
+ }
+ expressionProxy = PartFilterExprUtil.createExpressionProxy(conf);
+ fileMetadataManager = new FileMetadataManager(this.getMS(), conf);
+ }
+
+ private static String addPrefix(String s) {
+ return threadLocalId.get() + ": " + s;
+ }
+
+ /**
+ * Set copy of invoking HMSHandler on thread local
+ */
+ private static void setHMSHandler(HMSHandler handler) {
+ if (threadLocalHMSHandler.get() == null) {
+ threadLocalHMSHandler.set(handler);
+ }
+ }
+ @Override
+ public void setConf(Configuration conf) {
+ threadLocalConf.set(conf);
+ RawStore ms = threadLocalMS.get();
+ if (ms != null) {
+ ms.setConf(conf); // reload if DS related configuration is changed
+ }
+ }
+
+ @Override
+ public Configuration getConf() {
+ Configuration conf = threadLocalConf.get();
+ if (conf == null) {
+ conf = new Configuration(this.conf);
+ threadLocalConf.set(conf);
+ }
+ return conf;
+ }
+
+ private Map<String, String> getModifiedConf() {
+ Map<String, String> modifiedConf = threadLocalModifiedConfig.get();
+ if (modifiedConf == null) {
+ modifiedConf = new HashMap<>();
+ threadLocalModifiedConfig.set(modifiedConf);
+ }
+ return modifiedConf;
+ }
+
+ @Override
+ public Warehouse getWh() {
+ return wh;
+ }
+
+ @Override
+ public void setMetaConf(String key, String value) throws MetaException {
+ ConfVars confVar = MetastoreConf.getMetaConf(key);
+ if (confVar == null) {
+ throw new MetaException("Invalid configuration key " + key);
+ }
+ try {
+ confVar.validate(value);
+ } catch (IllegalArgumentException e) {
+ throw new MetaException("Invalid configuration value " + value + " for key " + key +
+ " by " + e.getMessage());
+ }
+ Configuration configuration = getConf();
+ String oldValue = MetastoreConf.get(configuration, key);
+ // Save prev val of the key on threadLocal
+ Map<String, String> modifiedConf = getModifiedConf();
+ if (!modifiedConf.containsKey(key)) {
+ modifiedConf.put(key, oldValue);
+ }
+ // Set invoking HMSHandler on threadLocal, this will be used later to notify
+ // metaListeners in HiveMetaStore#cleanupRawStore
+ setHMSHandler(this);
+ configuration.set(key, value);
+ notifyMetaListeners(key, oldValue, value);
+ }
+
+ @Override
+ public String getMetaConf(String key) throws MetaException {
+ ConfVars confVar = MetastoreConf.getMetaConf(key);
+ if (confVar == null) {
+ throw new MetaException("Invalid configuration key " + key);
+ }
+ return getConf().get(key, confVar.getDefaultVal().toString());
+ }
+
+ /**
+ * Get a cached RawStore.
+ *
+ * @return the cached RawStore
+ * @throws MetaException
+ */
+ @Override
+ public RawStore getMS() throws MetaException {
+ Configuration conf = getConf();
+ return getMSForConf(conf);
+ }
+
+ public static RawStore getMSForConf(Configuration conf) throws MetaException {
+ RawStore ms = threadLocalMS.get();
+ if (ms == null) {
+ ms = newRawStoreForConf(conf);
+ ms.verifySchema();
+ threadLocalMS.set(ms);
+ ms = threadLocalMS.get();
+ }
+ return ms;
+ }
+
+ private TxnStore getTxnHandler() {
+ TxnStore txn = threadLocalTxn.get();
+ if (txn == null) {
+ txn = TxnUtils.getTxnStore(conf);
+ threadLocalTxn.set(txn);
+ }
+ return txn;
+ }
+
+ private static RawStore newRawStoreForConf(Configuration conf) throws MetaException {
+ Configuration newConf = new Configuration(conf);
+ String rawStoreClassName = MetastoreConf.getVar(newConf, ConfVars.RAW_STORE_IMPL);
+ LOG.info(addPrefix("Opening raw store with implementation class:" + rawStoreClassName));
+ return RawStoreProxy.getProxy(newConf, conf, rawStoreClassName, threadLocalId.get());
+ }
+
+ private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException {
+ try {
+ ms.getDatabase(DEFAULT_DATABASE_NAME);
+ } catch (NoSuchObjectException e) {
+ Database db = new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT,
+ wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null);
+ db.setOwnerName(PUBLIC);
+ db.setOwnerType(PrincipalType.ROLE);
+ ms.createDatabase(db);
+ }
+ }
+
+ /**
+ * create default database if it doesn't exist.
+ *
+ * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+ * Server try to concurrently invoke createDefaultDB. If one failed, JDOException was caught
+ * for one more time try, if failed again, simply ignored by warning, which meant another
+ * succeeds.
+ *
+ * @throws MetaException
+ */
+ private void createDefaultDB() throws MetaException {
+ try {
+ createDefaultDB_core(getMS());
+ } catch (JDOException e) {
+ LOG.warn("Retrying creating default database after error: " + e.getMessage(), e);
+ try {
+ createDefaultDB_core(getMS());
+ } catch (InvalidObjectException e1) {
+ throw new MetaException(e1.getMessage());
+ }
+ } catch (InvalidObjectException e) {
+ throw new MetaException(e.getMessage());
+ }
+ }
+
+ /**
+ * create default roles if they don't exist.
+ *
+ * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+ * Server try to concurrently invoke createDefaultRoles. If one failed, JDOException was caught
+ * for one more time try, if failed again, simply ignored by warning, which meant another
+ * succeeds.
+ *
+ * @throws MetaException
+ */
+ private void createDefaultRoles() throws MetaException {
+ try {
+ createDefaultRoles_core();
+ } catch (JDOException e) {
+ LOG.warn("Retrying creating default roles after error: " + e.getMessage(), e);
+ createDefaultRoles_core();
+ }
+ }
+
+ private void createDefaultRoles_core() throws MetaException {
+
+ RawStore ms = getMS();
+ try {
+ ms.addRole(ADMIN, ADMIN);
+ } catch (InvalidObjectException e) {
+ LOG.debug(ADMIN +" role already exists",e);
+ } catch (NoSuchObjectException e) {
+ // This should never be thrown.
+ LOG.warn("Unexpected exception while adding " +ADMIN+" roles" , e);
+ }
+ LOG.info("Added "+ ADMIN+ " role in metastore");
+ try {
+ ms.addRole(PUBLIC, PUBLIC);
+ } catch (InvalidObjectException e) {
+ LOG.debug(PUBLIC + " role already exists",e);
+ } catch (NoSuchObjectException e) {
+ // This should never be thrown.
+ LOG.warn("Unexpected exception while adding "+PUBLIC +" roles" , e);
+ }
+ LOG.info("Added "+PUBLIC+ " role in metastore");
+ // now grant all privs to admin
+ PrivilegeBag privs = new PrivilegeBag();
+ privs.addToPrivileges(new HiveObjectPrivilege( new HiveObjectRef(HiveObjectType.GLOBAL, null,
+ null, null, null), ADMIN, PrincipalType.ROLE, new PrivilegeGrantInfo("All", 0, ADMIN,
+ PrincipalType.ROLE, true)));
+ try {
+ ms.grantPrivileges(privs);
+ } catch (InvalidObjectException e) {
+ // Surprisingly these privs are already granted.
+ LOG.debug("Failed while granting global privs to admin", e);
+ } catch (NoSuchObjectException e) {
+ // Unlikely to be thrown.
+ LOG.warn("Failed while granting global privs to admin", e);
+ }
+ }
+
+ /**
+ * add admin users if they don't exist.
+ *
+ * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+ * Server try to concurrently invoke addAdminUsers. If one failed, JDOException was caught for
+ * one more time try, if failed again, simply ignored by warning, which meant another succeeds.
+ *
+ * @throws MetaException
+ */
+ private void addAdminUsers() throws MetaException {
+ try {
+ addAdminUsers_core();
+ } catch (JDOException e) {
+ LOG.warn("Retrying adding admin users after error: " + e.getMessage(), e);
+ addAdminUsers_core();
+ }
+ }
+
+ private void addAdminUsers_core() throws MetaException {
+
+ // now add pre-configured users to admin role
+ String userStr = MetastoreConf.getVar(conf,ConfVars.USERS_IN_ADMIN_ROLE,"").trim();
+ if (userStr.isEmpty()) {
+ LOG.info("No user is added in admin role, since config is empty");
+ return;
+ }
+ // Since user names need to be valid unix user names, per IEEE Std 1003.1-2001 they cannot
+ // contain comma, so we can safely split above string on comma.
+
+ Iterator<String> users = Splitter.on(",").trimResults().omitEmptyStrings().split(userStr).iterator();
+ if (!users.hasNext()) {
+ LOG.info("No user is added in admin role, since config value "+ userStr +
+ " is in incorrect format. We accept comma separated list of users.");
+ return;
+ }
+ Role adminRole;
+ RawStore ms = getMS();
+ try {
+ adminRole = ms.getRole(ADMIN);
+ } catch (NoSuchObjectException e) {
+ LOG.error("Failed to retrieve just added admin role",e);
+ return;
+ }
+ while (users.hasNext()) {
+ String userName = users.next();
+ try {
+ ms.grantRole(adminRole, userName, PrincipalType.USER, ADMIN, PrincipalType.ROLE, true);
+ LOG.info("Added " + userName + " to admin role");
+ } catch (NoSuchObjectException e) {
+ LOG.error("Failed to add "+ userName + " in admin role",e);
+ } catch (InvalidObjectException e) {
+ LOG.debug(userName + " already in admin role", e);
+ }
+ }
+ }
+
+ private static void logInfo(String m) {
+ LOG.info(threadLocalId.get().toString() + ": " + m);
+ logAuditEvent(m);
+ }
+
+ private String startFunction(String function, String extraLogInfo) {
+ incrementCounter(function);
+ logInfo((getThreadLocalIpAddress() == null ? "" : "source:" + getThreadLocalIpAddress() + " ") +
+ function + extraLogInfo);
+ com.codahale.metrics.Timer timer =
+ Metrics.getOrCreateTimer(MetricsConstants.API_PREFIX + function);
+ if (timer != null) {
+ // Timer will be null we aren't using the metrics
+ timerContexts.get().put(function, timer.time());
+ }
+ Counter counter = Metrics.getOrCreateCounter(MetricsConstants.ACTIVE_CALLS + function);
+ if (counter != null) counter.inc();
+ return function;
+ }
+
+ private String startFunction(String function) {
+ return startFunction(function, "");
+ }
+
+ private void startTableFunction(String function, String db, String tbl) {
+ startFunction(function, " : db=" + db + " tbl=" + tbl);
+ }
+
+ private void startMultiTableFunction(String function, String db, List<String> tbls) {
+ String tableNames = join(tbls, ",");
+ startFunction(function, " : db=" + db + " tbls=" + tableNames);
+ }
+
+ private void startPartitionFunction(String function, String db, String tbl,
+ List<String> partVals) {
+ startFunction(function, " : db=" + db + " tbl=" + tbl + "[" + join(partVals, ",") + "]");
+ }
+
+ private void startPartitionFunction(String function, String db, String tbl,
+ Map<String, String> partName) {
+ startFunction(function, " : db=" + db + " tbl=" + tbl + "partition=" + partName);
+ }
+
+ private void endFunction(String function, boolean successful, Exception e) {
+ endFunction(function, successful, e, null);
+ }
+ private void endFunction(String function, boolean successful, Exception e,
+ String inputTableName) {
+ endFunction(function, new MetaStoreEndFunctionContext(successful, e, inputTableName));
+ }
+
+ private void endFunction(String function, MetaStoreEndFunctionContext context) {
+ com.codahale.metrics.Timer.Context timerContext = timerContexts.get().remove(function);
+ if (timerContext != null) {
+ timerContext.close();
+ }
+ Counter counter = Metrics.getOrCreateCounter(MetricsConstants.ACTIVE_CALLS + function);
+ if (counter != null) counter.dec();
+
+ for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
+ listener.onEndFunction(function, context);
+ }
+ }
+
+ @Override
+ public fb_status getStatus() {
+ return fb_status.ALIVE;
+ }
+
+ @Override
+ public void shutdown() {
+ cleanupRawStore();
+ PerfLogger.getPerfLogger(false).cleanupPerfLogMetrics();
+ }
+
+ @Override
+ public AbstractMap<String, Long> getCounters() {
+ AbstractMap<String, Long> counters = super.getCounters();
+
+ // Allow endFunctionListeners to add any counters they have collected
+ if (endFunctionListeners != null) {
+ for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
+ listener.exportCounters(counters);
+ }
+ }
+
+ return counters;
+ }
+
+ private void create_database_core(RawStore ms, final Database db)
+ throws AlreadyExistsException, InvalidObjectException, MetaException {
+ if (!MetaStoreUtils.validateName(db.getName(), null)) {
+ throw new InvalidObjectException(db.getName() + " is not a valid database name");
+ }
+
+ if (null == db.getLocationUri()) {
+ db.setLocationUri(wh.getDefaultDatabasePath(db.getName()).toString());
+ } else {
+ db.setLocationUri(wh.getDnsPath(new Path(db.getLocationUri())).toString());
+ }
+
+ Path dbPath = new Path(db.getLocationUri());
+ boolean success = false;
+ boolean madeDir = false;
+ Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+ try {
+ firePreEvent(new PreCreateDatabaseEvent(db, this));
+ if (!wh.isDir(dbPath)) {
+ if (!wh.mkdirs(dbPath)) {
+ throw new MetaException("Unable to create database path " + dbPath +
+ ", failed to create database " + db.getName());
+ }
+ madeDir = true;
+ }
+
+ ms.openTransaction();
+ ms.createDatabase(db);
+
+ if (!transactionalListeners.isEmpty()) {
+ transactionalListenersResponses =
+ MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+ EventType.CREATE_DATABASE,
+ new CreateDatabaseEvent(db, true, this));
+ }
+
+ success = ms.commitTransaction();
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ if (madeDir) {
+ wh.deleteDir(dbPath, true);
+ }
+ }
+
+ if (!listeners.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(listeners,
+ EventType.CREATE_DATABASE,
+ new CreateDatabaseEvent(db, success, this),
+ null,
+ transactionalListenersResponses, ms);
+ }
+ }
+ }
+
+ @Override
+ public void create_database(final Database db)
+ throws AlreadyExistsException, InvalidObjectException, MetaException {
+ startFunction("create_database", ": " + db.toString());
+ boolean success = false;
+ Exception ex = null;
+ try {
+ try {
+ if (null != get_database_core(db.getName())) {
+ throw new AlreadyExistsException("Database " + db.getName() + " already exists");
+ }
+ } catch (NoSuchObjectException e) {
+ // expected
+ }
+
+ if (TEST_TIMEOUT_ENABLED) {
+ try {
+ Thread.sleep(TEST_TIMEOUT_VALUE);
+ } catch (InterruptedException e) {
+ // do nothing
+ }
+ Deadline.checkTimeout();
+ }
+ create_database_core(getMS(), db);
+ success = true;
+ } catch (Exception e) {
+ ex = e;
+ if (e instanceof MetaException) {
+ throw (MetaException) e;
+ } else if (e instanceof InvalidObjectException) {
+ throw (InvalidObjectException) e;
+ } else if (e instanceof AlreadyExistsException) {
+ throw (AlreadyExistsException) e;
+ } else {
+ throw newMetaException(e);
+ }
+ } finally {
+ endFunction("create_database", success, ex);
+ }
+ }
+
+ @Override
+ public Database get_database(final String name) throws NoSuchObjectException, MetaException {
+ startFunction("get_database", ": " + name);
+ Database db = null;
+ Exception ex = null;
+ try {
+ db = get_database_core(name);
+ firePreEvent(new PreReadDatabaseEvent(db, this));
+ } catch (MetaException|NoSuchObjectException e) {
+ ex = e;
+ throw e;
+ } finally {
+ endFunction("get_database", db != null, ex);
+ }
+ return db;
+ }
+
+ @Override
+ public Database get_database_core(final String name) throws NoSuchObjectException,
+ MetaException {
+ Database db = null;
+ try {
+ db = getMS().getDatabase(name);
+ } catch (MetaException | NoSuchObjectException e) {
+ throw e;
+ } catch (Exception e) {
+ assert (e instanceof RuntimeException);
+ throw (RuntimeException) e;
+ }
+ return db;
+ }
+
+ @Override
+ public void alter_database(final String dbName, final Database newDB) throws TException {
+ startFunction("alter_database" + dbName);
+ boolean success = false;
+ Exception ex = null;
+
+ // Perform the same URI normalization as create_database_core.
+ if (newDB.getLocationUri() != null) {
+ newDB.setLocationUri(wh.getDnsPath(new Path(newDB.getLocationUri())).toString());
+ }
+
+ try {
+ Database oldDB = get_database_core(dbName);
+ if (oldDB == null) {
+ throw new MetaException("Could not alter database \"" + dbName + "\". Could not retrieve old definition.");
+ }
+ firePreEvent(new PreAlterDatabaseEvent(oldDB, newDB, this));
+ getMS().alterDatabase(dbName, newDB);
+ success = true;
+ } catch (Exception e) {
+ ex = e;
+ rethrowException(e);
+ } finally {
+ endFunction("alter_database", success, ex);
+ }
+ }
+
+ private void drop_database_core(RawStore ms,
+ final String name, final boolean deleteData, final boolean cascade)
+ throws NoSuchObjectException, InvalidOperationException, MetaException,
+ IOException, InvalidObjectException, InvalidInputException {
+ boolean success = false;
+ Database db = null;
+ List<Path> tablePaths = new ArrayList<>();
+ List<Path> partitionPaths = new ArrayList<>();
+ Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+ try {
+ ms.openTransaction();
+ db = ms.getDatabase(name);
+
+ firePreEvent(new PreDropDatabaseEvent(db, this));
+
+ List<String> allTables = get_all_tables(db.getName());
+ List<String> allFunctions = get_functions(db.getName(), "*");
+
+ if (!cascade) {
+ if (!allTables.isEmpty()) {
+ throw new InvalidOperationException(
+ "Database " + db.getName() + " is not empty. One or more tables exist.");
+ }
+ if (!allFunctions.isEmpty()) {
+ throw new InvalidOperationException(
+ "Database " + db.getName() + " is not empty. One or more functions exist.");
+ }
+ }
+ Path path = new Path(db.getLocationUri()).getParent();
+ if (!wh.isWritable(path)) {
+ throw new MetaException("Database not dropped since " +
+ path + " is not writable by " +
+ SecurityUtils.getUser());
+ }
+
+ Path databasePath = wh.getDnsPath(wh.getDatabasePath(db));
+
+ // drop any functions before dropping db
+ for (String funcName : allFunctions) {
+ drop_function(name, funcName);
+ }
+
+ // drop tables before dropping db
+ int tableBatchSize = MetastoreConf.getIntVar(conf,
+ ConfVars.BATCH_RETRIEVE_MAX);
+
+ int startIndex = 0;
+ // retrieve the tables from the metastore in batches to alleviate memory constraints
+ while (startIndex < allTables.size()) {
+ int endIndex = Math.min(startIndex + tableBatchSize, allTables.size());
+
+ List<Table> tables;
+ try {
+ tables = ms.getTableObjectsByName(name, allTables.subList(startIndex, endIndex));
+ } catch (UnknownDBException e) {
+ throw new MetaException(e.getMessage());
+ }
+
+ if (tables != null && !tables.isEmpty()) {
+ for (Table table : tables) {
+
+ // If the table is not external and it might not be in a subdirectory of the database
+ // add it's locations to the list of paths to delete
+ Path tablePath = null;
+ if (table.getSd().getLocation() != null && !isExternal(table)) {
+ tablePath = wh.getDnsPath(new Path(table.getSd().getLocation()));
+ if (!wh.isWritable(tablePath.getParent())) {
+ throw new MetaException("Database metadata not deleted since table: " +
+ table.getTableName() + " has a parent location " + tablePath.getParent() +
+ " which is not writable by " + SecurityUtils.getUser());
+ }
+
+ if (!isSubdirectory(databasePath, tablePath)) {
+ tablePaths.add(tablePath);
+ }
+ }
+
+ // For each partition in each table, drop the partitions and get a list of
+ // partitions' locations which might need to be deleted
+ partitionPaths = dropPartitionsAndGetLocations(ms, name, table.getTableName(),
+ tablePath, table.getPartitionKeys(), deleteData && !isExternal(table));
+
+ // Drop the table but not its data
+ drop_table(name, table.getTableName(), false);
+ }
+
+ startIndex = endIndex;
+ }
+ }
+
+ if (ms.dropDatabase(name)) {
+ if (!transactionalListeners.isEmpty()) {
+ transactionalListenerResponses =
+ MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+ EventType.DROP_DATABASE,
+ new DropDatabaseEvent(db, true, this));
+ }
+
+ success = ms.commitTransaction();
+ }
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ } else if (deleteData) {
+ // Delete the data in the partitions which have other locations
+ deletePartitionData(partitionPaths);
+ // Delete the data in the tables which have other locations
+ for (Path tablePath : tablePaths) {
+ deleteTableData(tablePath);
+ }
+ // Delete the data in the database
+ try {
+ wh.deleteDir(new Path(db.getLocationUri()), true);
+ } catch (Exception e) {
+ LOG.error("Failed to delete database directory: " + db.getLocationUri() +
+ " " + e.getMessage());
+ }
+ // it is not a terrible thing even if the data is not deleted
+ }
+
+ if (!listeners.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(listeners,
+ EventType.DROP_DATABASE,
+ new DropDatabaseEvent(db, success, this),
+ null,
+ transactionalListenerResponses, ms);
+ }
+ }
+ }
+
+ /**
+ * Returns a BEST GUESS as to whether or not other is a subdirectory of parent. It does not
+ * take into account any intricacies of the underlying file system, which is assumed to be
+ * HDFS. This should not return any false positives, but may return false negatives.
+ *
+ * @param parent
+ * @param other
+ * @return
+ */
+ private boolean isSubdirectory(Path parent, Path other) {
+ return other.toString().startsWith(parent.toString().endsWith(Path.SEPARATOR) ?
+ parent.toString() : parent.toString() + Path.SEPARATOR);
+ }
+
+ @Override
+ public void drop_database(final String dbName, final boolean deleteData, final boolean cascade)
+ throws NoSuchObjectException, InvalidOperationException, MetaException {
+
+ startFunction("drop_database", ": " + dbName);
+ if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(dbName)) {
+ endFunction("drop_database", false, null);
+ throw new MetaException("Can not drop default database");
+ }
+
+ boolean success = false;
+ Exception ex = null;
+ try {
+ drop_database_core(getMS(), dbName, deleteData, cascade);
+ success = true;
+ } catch (IOException e) {
+ ex = e;
+ throw new MetaException(e.getMessage());
+ } catch (Exception e) {
+ ex = e;
+ if (e instanceof MetaException) {
+ throw (MetaException) e;
+ } else if (e instanceof InvalidOperationException) {
+ throw (InvalidOperationException) e;
+ } else if (e instanceof NoSuchObjectException) {
+ throw (NoSuchObjectException) e;
+ } else {
+ throw newMetaException(e);
+ }
+ } finally {
+ endFunction("drop_database", success, ex);
+ }
+ }
+
+ @Override
+ public List<String> get_databases(final String pattern) throws MetaException {
+ startFunction("get_databases", ": " + pattern);
+
+ List<String> ret = null;
+ Exception ex = null;
+ try {
+ ret = getMS().getDatabases(pattern);
+ } catch (Exception e) {
+ ex = e;
+ if (e instanceof MetaException) {
+ throw (MetaException) e;
+ } else {
+ throw newMetaException(e);
+ }
+ } finally {
+ endFunction("get_databases", ret != null, ex);
+ }
+ return ret;
+ }
+
+ @Override
+ public List<String> get_all_databases() throws MetaException {
+ startFunction("get_all_databases");
+
+ List<String> ret = null;
+ Exception ex = null;
+ try {
+ ret = getMS().getAllDatabases();
+ } catch (Exception e) {
+ ex = e;
+ if (e instanceof MetaException) {
+ throw (MetaException) e;
+ } else {
+ throw newMetaException(e);
+ }
+ } finally {
+ endFunction("get_all_databases", ret != null, ex);
+ }
+ return ret;
+ }
+
+ private void create_type_core(final RawStore ms, final Type type)
+ throws AlreadyExistsException, MetaException, InvalidObjectException {
+ if (!MetaStoreUtils.validateName(type.getName(), null)) {
+ throw new InvalidObjectException("Invalid type name");
+ }
+
+ boolean success = false;
+ try {
+ ms.openTransaction();
+ if (is_type_exists(ms, type.getName())) {
+ throw new AlreadyExistsException("Type " + type.getName() + " already exists");
+ }
+ ms.createType(type);
+ success = ms.commitTransaction();
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ }
+ }
+ }
+
+ @Override
+ public boolean create_type(final Type type) throws AlreadyExistsException,
+ MetaException, InvalidObjectException {
+ startFunction("create_type", ": " + type.toString());
+ boolean success = false;
+ Exception ex = null;
+ try {
+ create_type_core(getMS(), type);
+ success = true;
+ } catch (Exception e) {
+ ex = e;
+ if (e instanceof MetaException) {
+ throw (MetaException) e;
+ } else if (e instanceof InvalidObjectException) {
+ throw (InvalidObjectException) e;
+ } else if (e instanceof AlreadyExistsException) {
+ throw (AlreadyExistsException) e;
+ } else {
+ throw newMetaException(e);
+ }
+ } finally {
+ endFunction("create_type", success, ex);
+ }
+
+ return success;
+ }
+
+ @Override
+ public Type get_type(final String name) throws MetaException, NoSuchObjectException {
+ startFunction("get_type", ": " + name);
+
+ Type ret = null;
+ Exception ex = null;
+ try {
+ ret = getMS().getType(name);
+ if (null == ret) {
+ throw new NoSuchObjectException("Type \"" + name + "\" not found.");
+ }
+ } catch (Exception e) {
+ ex = e;
+ throwMetaException(e);
+ } finally {
+ endFunction("get_type", ret != null, ex);
+ }
+ return ret;
+ }
+
+ private boolean is_type_exists(RawStore ms, String typeName)
+ throws MetaException {
+ return (ms.getType(typeName) != null);
+ }
+
+ @Override
+ public boolean drop_type(final String name) throws MetaException, NoSuchObjectException {
+ startFunction("drop_type", ": " + name);
+
+ boolean success = false;
+ Exception ex = null;
+ try {
+ // TODO:pc validate that there are no types that refer to this
+ success = getMS().dropType(name);
+ } catch (Exception e) {
+ ex = e;
+ throwMetaException(e);
+ } finally {
+ endFunction("drop_type", success, ex);
+ }
+ return success;
+ }
+
+ @Override
+ public Map<String, Type> get_type_all(String name) throws MetaException {
+ // TODO Auto-generated method stub
+ startFunction("get_type_all", ": " + name);
+ endFunction("get_type_all", false, null);
+ throw new MetaException("Not yet implemented");
+ }
+
+ private void create_table_core(final RawStore ms, final Table tbl,
+ final EnvironmentContext envContext)
+ throws AlreadyExistsException, MetaException,
+ InvalidObjectException, NoSuchObjectException {
+ create_table_core(ms, tbl, envContext, null, null, null, null);
+ }
+
+ private void create_table_core(final RawStore ms, final Table tbl,
+ final EnvironmentContext envContext, List<SQLPrimaryKey> primaryKeys,
+ List<SQLForeignKey> foreignKeys, List<SQLUniqueConstraint> uniqueConstraints,
+ List<SQLNotNullConstraint> notNullConstraints)
+ throws AlreadyExistsException, MetaException,
+ InvalidObjectException, NoSuchObjectException {
+ if (!MetaStoreUtils.validateName(tbl.getTableName(), conf)) {
+ throw new InvalidObjectException(tbl.getTableName()
+ + " is not a valid object name");
+ }
+ String validate = MetaStoreUtils.validateTblColumns(tbl.getSd().getCols());
+ if (validate != null) {
+ throw new InvalidObjectException("Invalid column " + validate);
+ }
+ if (tbl.getPartitionKeys() != null) {
+ validate = MetaStoreUtils.validateTblColumns(tbl.getPartitionKeys());
+ if (validate != null) {
+ throw new InvalidObjectException("Invalid partition column " + validate);
+ }
+ }
+ SkewedInfo skew = tbl.getSd().getSkewedInfo();
+ if (skew != null) {
+ validate = MetaStoreUtils.validateSkewedColNames(skew.getSkewedColNames());
+ if (validate != null) {
+ throw new InvalidObjectException("Invalid skew column " + validate);
+ }
+ validate = MetaStoreUtils.validateSkewedColNamesSubsetCol(
+ skew.getSkewedColNames(), tbl.getSd().getCols());
+ if (validate != null) {
+ throw new InvalidObjectException("Invalid skew column " + validate);
+ }
+ }
+
+ Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+ Path tblPath = null;
+ boolean success = false, madeDir = false;
+ try {
+ firePreEvent(new PreCreateTableEvent(tbl, this));
+
+ ms.openTransaction();
+
+ Database db = ms.getDatabase(tbl.getDbName());
+ if (db == null) {
+ throw new NoSuchObjectException("The database " + tbl.getDbName() + " does not exist");
+ }
+
+ // get_table checks whether database exists, it should be moved here
+ if (is_table_exists(ms, tbl.getDbName(), tbl.getTableName())) {
+ throw new AlreadyExistsException("Table " + tbl.getTableName()
+ + " already exists");
+ }
+
+ if (!TableType.VIRTUAL_VIEW.toString().equals(tbl.getTableType())) {
+ if (tbl.getSd().getLocation() == null
+ || tbl.getSd().getLocation().isEmpty()) {
+ tblPath = wh.getDefaultTablePath(
+ ms.getDatabase(tbl.getDbName()), tbl.getTableName());
+ } else {
+ if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) {
+ LOG.warn("Location: " + tbl.getSd().getLocation()
+ + " specified for non-external table:" + tbl.getTableName());
+ }
+ tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation()));
+ }
+ tbl.getSd().setLocation(tblPath.toString());
+ }
+
+ if (tblPath != null) {
+ if (!wh.isDir(tblPath)) {
+ if (!wh.mkdirs(tblPath)) {
+ throw new MetaException(tblPath
+ + " is not a directory or unable to create one");
+ }
+ madeDir = true;
+ }
+ }
+ if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) &&
+ !MetaStoreUtils.isView(tbl)) {
+ MetaStoreUtils.updateTableStatsFast(db, tbl, wh, madeDir, envContext);
+ }
+
+ // set create time
+ long time = System.currentTimeMillis() / 1000;
+ tbl.setCreateTime((int) time);
+ if (tbl.getParameters() == null ||
+ tbl.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) {
+ tbl.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
+ }
+ if (primaryKeys == null && foreignKeys == null
+ && uniqueConstraints == null && notNullConstraints == null) {
+ ms.createTable(tbl);
+ } else {
+ // Set constraint name if null before sending to listener
+ List<String> constraintNames = ms.createTableWithConstraints(tbl, primaryKeys, foreignKeys,
+ uniqueConstraints, notNullConstraints);
+ int primaryKeySize = 0;
+ if (primaryKeys != null) {
+ primaryKeySize = primaryKeys.size();
+ for (int i = 0; i < primaryKeys.size(); i++) {
+ if (primaryKeys.get(i).getPk_name() == null) {
+ primaryKeys.get(i).setPk_name(constraintNames.get(i));
+ }
+ }
+ }
+ int foreignKeySize = 0;
+ if (foreignKeys != null) {
+ foreignKeySize = foreignKeys.size();
+ for (int i = 0; i < foreignKeySize; i++) {
+ if (foreignKeys.get(i).getFk_name() == null) {
+ foreignKeys.get(i).setFk_name(constraintNames.get(primaryKeySize + i));
+ }
+ }
+ }
+ int uniqueConstraintSize = 0;
+ if (uniqueConstraints != null) {
+ uniqueConstraintSize = uniqueConstraints.size();
+ for (int i = 0; i < uniqueConstraintSize; i++) {
+ if (uniqueConstraints.get(i).getUk_name() == null) {
+ uniqueConstraints.get(i).setUk_name(constraintNames.get(primaryKeySize + foreignKeySize + i));
+ }
+ }
+ }
+ if (notNullConstraints != null) {
+ for (int i = 0; i < notNullConstraints.size(); i++) {
+ if (notNullConstraints.get(i).getNn_name() == null) {
+ notNullConstraints.get(i).setNn_name(constraintNames.get(primaryKeySize + foreignKeySize + uniqueConstraintSize + i));
+ }
+ }
+ }
+ }
+
+ if (!transactionalListeners.isEmpty()) {
+ transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+ EventType.CREATE_TABLE, new CreateTableEvent(tbl, true, this), envContext);
+ if (primaryKeys != null && !primaryKeys.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PRIMARYKEY,
+ new AddPrimaryKeyEvent(primaryKeys, true, this), envContext);
+ }
+ if (foreignKeys != null && !foreignKeys.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_FOREIGNKEY,
+ new AddForeignKeyEvent(foreignKeys, true, this), envContext);
+ }
+ if (uniqueConstraints != null && !uniqueConstraints.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_UNIQUECONSTRAINT,
+ new AddUniqueConstraintEvent(uniqueConstraints, true, this), envContext);
+ }
+ if (notNullConstraints != null && !notNullConstraints.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_NOTNULLCONSTRAINT,
+ new AddNotNullConstraintEvent(notNullConstraints, true, this), envContext);
+ }
+ }
+
+ success = ms.commitTransaction();
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ if (madeDir) {
+ wh.deleteDir(tblPath, true);
+ }
+ }
+
+ if (!listeners.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(listeners, EventType.CREATE_TABLE,
+ new CreateTableEvent(tbl, success, this), envContext, transactionalListenerResponses, ms);
+ if (primaryKeys != null && !primaryKeys.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PRIMARYKEY,
+ new AddPrimaryKeyEvent(primaryKeys, success, this), envContext);
+ }
+ if (foreignKeys != null && !foreignKeys.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_FOREIGNKEY,
+ new AddForeignKeyEvent(foreignKeys, success, this), envContext);
+ }
+ if (uniqueConstraints != null && !uniqueConstraints.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_UNIQUECONSTRAINT,
+ new AddUniqueConstraintEvent(uniqueConstraints, success, this), envContext);
+ }
+ if (notNullConstraints != null && !notNullConstraints.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_NOTNULLCONSTRAINT,
+ new AddNotNullConstraintEvent(notNullConstraints, success, this), envContext);
+ }
+ }
+ }
+ }
+
+ @Override
+ public void create_table(final Table tbl) throws AlreadyExistsException,
+ MetaException, InvalidObjectException {
+ create_table_with_environment_context(tbl, null);
+ }
+
+ @Override
+ public void create_table_with_environment_context(final Table tbl,
+ final EnvironmentContext envContext)
+ throws AlreadyExistsException, MetaException, InvalidObjectException {
+ startFunction("create_table", ": " + tbl.toString());
+ boolean success = false;
+ Exception ex = null;
+ try {
+ create_table_core(getMS(), tbl, envContext);
+ success = true;
+ } catch (NoSuchObjectException e) {
+ ex = e;
+ throw new InvalidObjectException(e.getMessage());
+ } catch (Exception e) {
+ ex = e;
+ if (e instanceof MetaException) {
+ throw (MetaException) e;
+ } else if (e instanceof InvalidObjectException) {
+ throw (InvalidObjectException) e;
+ } else if (e instanceof AlreadyExistsException) {
+ throw (AlreadyExistsException) e;
+ } else {
+ throw newMetaException(e);
+ }
+ } finally {
+ endFunction("create_table", success, ex, tbl.getTableName());
+ }
+ }
+
+ @Override
+ public void create_table_with_constraints(final Table tbl,
+ final List<SQLPrimaryKey> primaryKeys, final List<SQLForeignKey> foreignKeys,
+ List<SQLUniqueConstraint> uniqueConstraints,
+ List<SQLNotNullConstraint> notNullConstraints)
+ throws AlreadyExistsException, MetaException, InvalidObjectException {
+ startFunction("create_table", ": " + tbl.toString());
+ boolean success = false;
+ Exception ex = null;
+ try {
+ create_table_core(getMS(), tbl, null, primaryKeys, foreignKeys,
+ uniqueConstraints, notNullConstraints);
+ success = true;
+ } catch (NoSuchObjectException e) {
+ ex = e;
+ throw new InvalidObjectException(e.getMessage());
+ } catch (Exception e) {
+ ex = e;
+ if (e instanceof MetaException) {
+ throw (MetaException) e;
+ } else if (e instanceof InvalidObjectException) {
+ throw (InvalidObjectException) e;
+ } else if (e instanceof AlreadyExistsException) {
+ throw (AlreadyExistsException) e;
+ } else {
+ throw newMetaException(e);
+ }
+ } finally {
+ endFunction("create_table", success, ex, tbl.getTableName());
+ }
+ }
+
+ @Override
+ public void drop_constraint(DropConstraintRequest req)
+ throws MetaException, InvalidObjectException {
+ String dbName = req.getDbname();
+ String tableName = req.getTablename();
+ String constraintName = req.getConstraintname();
+ startFunction("drop_constraint", ": " + constraintName);
+ boolean success = false;
+ Exception ex = null;
+ RawStore ms = getMS();
+ try {
+ ms.openTransaction();
+ ms.dropConstraint(dbName, tableName, constraintName);
+ if (transactionalListeners.size() > 0) {
+ DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(dbName,
+ tableName, constraintName, true, this);
+ for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+ transactionalListener.onDropConstraint(dropConstraintEvent);
+ }
+ }
+ success = ms.commitTransaction();
+ } catch (NoSuchObjectException e) {
+ ex = e;
+ throw new InvalidObjectException(e.getMessage());
+ } catch (Exception e) {
+ ex = e;
+ if (e instanceof MetaException) {
+ throw (MetaException) e;
+ } else {
+ throw newMetaException(e);
+ }
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ } else {
+ for (MetaStoreEventListener listener : listeners) {
+ DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(dbName,
+ tableName, constraintName, true, this);
+ listener.onDropConstraint(dropConstraintEvent);
+ }
+ }
+ endFunction("drop_constraint", success, ex, constraintName);
+ }
+ }
+
+ @Override
+ public void add_primary_key(AddPrimaryKeyRequest req)
+ throws MetaException, InvalidObjectException {
+ List<SQLPrimaryKey> primaryKeyCols = req.getPrimaryKeyCols();
+ String constraintName = (primaryKeyCols != null && primaryKeyCols.size() > 0) ?
+ primaryKeyCols.get(0).getPk_name() : "null";
+ startFunction("add_primary_key", ": " + constraintName);
+ boolean success = false;
+ Exception ex = null;
+ RawStore ms = getMS();
+ try {
+ ms.openTransaction();
+ List<String> constraintNames = ms.addPrimaryKeys(primaryKeyCols);
+ // Set primary key name if null before sending to listener
+ if (primaryKeyCols != null) {
+ for (int i = 0; i < primaryKeyCols.size(); i++) {
+ if (primaryKeyCols.get(i).getPk_name() == null) {
+ primaryKeyCols.get(i).setPk_name(constraintNames.get(i));
+ }
+ }
+ }
+ if (transactionalListeners.size() > 0) {
+ if (primaryKeyCols != null && primaryKeyCols.size() > 0) {
+ AddPrimaryKeyEvent addPrimaryKeyEvent = new AddPrimaryKeyEvent(primaryKeyCols, true, this);
+ for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+ transactionalListener.onAddPrimaryKey(addPrimaryKeyEvent);
+ }
+ }
+ }
+ success = ms.commitTransaction();
+ } catch (Exception e) {
+ ex = e;
+ if (e instanceof MetaException) {
+ throw (MetaException) e;
+ } else if (e instanceof InvalidObjectException) {
+ throw (InvalidObjectException) e;
+ } else {
+ throw newMetaException(e);
+ }
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ } else if (primaryKeyCols != null && primaryKeyCols.size() > 0) {
+ for (MetaStoreEventListener listener : listeners) {
+ AddPrimaryKeyEvent addPrimaryKeyEvent = new AddPrimaryKeyEvent(primaryKeyCols, true, this);
+ listener.onAddPrimaryKey(addPrimaryKeyEvent);
+ }
+ }
+ endFunction("add_primary_key", success, ex, constraintName);
+ }
+ }
+
+ @Override
+ public void add_foreign_key(AddForeignKeyRequest req)
+ throws MetaException, InvalidObjectException {
+ List<SQLForeignKey> foreignKeyCols = req.getForeignKeyCols();
+ String constraintName = (foreignKeyCols != null && foreignKeyCols.size() > 0) ?
+ foreignKeyCols.get(0).getFk_name() : "null";
+ startFunction("add_foreign_key", ": " + constraintName);
+ boolean success = false;
+ Exception ex = null;
+ RawStore ms = getMS();
+ try {
+ ms.openTransaction();
+ List<String> constraintNames = ms.addForeignKeys(foreignKeyCols);
+ // Set foreign key name if null before sending to listener
+ if (foreignKeyCols != null) {
+ for (int i = 0; i < foreignKeyCols.size(); i++) {
+ if (foreignKeyCols.get(i).getFk_name() == null) {
+ foreignKeyCols.get(i).setFk_name(constraintNames.get(i));
+ }
+ }
+ }
+ if (transactionalListeners.size() > 0) {
+ if (foreignKeyCols != null && foreignKeyCols.size() > 0) {
+ AddForeignKeyEvent addForeignKeyEvent = new AddForeignKeyEvent(foreignKeyCols, true, this);
+ for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+ transactionalListener.onAddForeignKey(addForeignKeyEvent);
+ }
+ }
+ }
+ success = ms.commitTransaction();
+ } catch (Exception e) {
+ ex = e;
+ if (e instanceof MetaException) {
+ throw (MetaException) e;
+ } else if (e instanceof InvalidObjectException) {
+ throw (InvalidObjectException) e;
+ } else {
+ throw newMetaException(e);
+ }
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ } else if (foreignKeyCols != null && foreignKeyCols.size() > 0) {
+ for (MetaStoreEventListener listener : listeners) {
+ AddForeignKeyEvent addForeignKeyEvent = new AddForeignKeyEvent(foreignKeyCols, true, this);
+ listener.onAddForeignKey(addForeignKeyEvent);
+ }
+ }
+ endFunction("add_foreign_key", success, ex, constraintName);
+ }
+ }
+
+ @Override
+ public void add_unique_constraint(AddUniqueConstraintRequest req)
+ throws MetaException, InvalidObjectException {
+ List<SQLUniqueConstraint> uniqueConstraintCols = req.getUniqueConstraintCols();
+ String constraintName = (uniqueConstraintCols != null && uniqueConstraintCols.size() > 0) ?
+ uniqueConstraintCols.get(0).getUk_name() : "null";
+ startFunction("add_unique_constraint", ": " + constraintName);
+ boolean success = false;
+ Exception ex = null;
+ RawStore ms = getMS();
+ try {
+ ms.openTransaction();
+ List<String> constraintNames = ms.addUniqueConstraints(uniqueConstraintCols);
+ // Set unique constraint name if null before sending to listener
+ if (uniqueConstraintCols != null) {
+ for (int i = 0; i < uniqueConstraintCols.size(); i++) {
+ if (uniqueConstraintCols.get(i).getUk_name() == null) {
+ uniqueConstraintCols.get(i).setUk_name(constraintNames.get(i));
+ }
+ }
+ }
+ if (transactionalListeners.size() > 0) {
+ if (uniqueConstraintCols != null && uniqueConstraintCols.size() > 0) {
+ AddUniqueConstraintEvent addUniqueConstraintEvent = new AddUniqueConstraintEvent(uniqueConstraintCols, true, this);
+ for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+ transactionalListener.onAddUniqueConstraint(addUniqueConstraintEvent);
+ }
+ }
+ }
+ success = ms.commitTransaction();
+ } catch (Exception e) {
+ ex = e;
+ if (e instanceof MetaException) {
+ throw (MetaException) e;
+ } else if (e instanceof InvalidObjectException) {
+ throw (InvalidObjectException) e;
+ } else {
+ throw newMetaException(e);
+ }
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ } else if (uniqueConstraintCols != null && uniqueConstraintCols.size() > 0) {
+ for (MetaStoreEventListener listener : listeners) {
+ AddUniqueConstraintEvent addUniqueConstraintEvent = new AddUniqueConstraintEvent(uniqueConstraintCols, true, this);
+ listener.onAddUniqueConstraint(addUniqueConstraintEvent);
+ }
+ }
+ endFunction("add_unique_constraint", success, ex, constraintName);
+ }
+ }
+
+ @Override
+ public void add_not_null_constraint(AddNotNullConstraintRequest req)
+ throws MetaException, InvalidObjectException {
+ List<SQLNotNullConstraint> notNullConstraintCols = req.getNotNullConstraintCols();
+ String constraintName = (notNullConstraintCols != null && notNullConstraintCols.size() > 0) ?
+ notNullConstraintCols.get(0).getNn_name() : "null";
+ startFunction("add_not_null_constraint", ": " + constraintName);
+ boolean success = false;
+ Exception ex = null;
+ RawStore ms = getMS();
+ try {
+ ms.openTransaction();
+ List<String> constraintNames = ms.addNotNullConstraints(notNullConstraintCols);
+ // Set not null constraint name if null before sending to listener
+ if (notNullConstraintCols != null) {
+ for (int i = 0; i < notNullConstraintCols.size(); i++) {
+ if (notNullConstraintCols.get(i).getNn_name() == null) {
+ notNullConstraintCols.get(i).setNn_name(constraintNames.get(i));
+ }
+ }
+ }
+ if (transactionalListeners.size() > 0) {
+ if (notNullConstraintCols != null && notNullConstraintCols.size() > 0) {
+ AddNotNullConstraintEvent addNotNullConstraintEvent = new AddNotNullConstraintEvent(notNullConstraintCols, true, this);
+ for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+ transactionalListener.onAddNotNullConstraint(addNotNullConstraintEvent);
+ }
+ }
+ }
+ success = ms.commitTransaction();
+ } catch (Exception e) {
+ ex = e;
+ if (e instanceof MetaException) {
+ throw (MetaException) e;
+ } else if (e instanceof InvalidObjectException) {
+ throw (InvalidObjectException) e;
+ } else {
+ throw newMetaException(e);
+ }
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ } else if (notNullConstraintCols != null && notNullConstraintCols.size() > 0) {
+ for (MetaStoreEventListener listener : listeners) {
+ AddNotNullConstraintEvent addNotNullConstraintEvent = new AddNotNullConstraintEvent(notNullConstraintCols, true, this);
+ listener.onAddNotNullConstraint(addNotNullConstraintEvent);
+ }
+ }
+ endFunction("add_not_null_constraint", success, ex, constraintName);
+ }
+ }
+
+ private boolean is_table_exists(RawStore ms, String dbname, String name)
+ throws MetaException {
+ return (ms.getTable(dbname, name) != null);
+ }
+
+ private boolean drop_table_core(final RawStore ms, final String dbname, final String name,
+ final boolean deleteData, final EnvironmentContext envContext,
+ final String indexName) throws NoSuchObjectException,
+ MetaException, IOException, InvalidObjectException, InvalidInputException {
+ boolean success = false;
+ boolean isExternal = false;
+ Path tblPath = null;
+ List<Path> partPaths = null;
+ Table tbl = null;
+ boolean ifPurge = false;
+ Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+ try {
+ ms.openTransaction();
+ // drop any partitions
+ tbl = get_table_core(dbname, name);
+ if (tbl == null) {
+ throw new NoSuchObjectException(name + " doesn't exist");
+ }
+ if (tbl.getSd() == null) {
+ throw new MetaException("Table metadata is corrupted");
+ }
+ ifPurge = isMustPurge(envContext, tbl);
+
+ firePreEvent(new PreDropTableEvent(tbl, deleteData, this));
+
+ boolean isIndexTable = isIndexTable(tbl);
+ if (indexName == null && isIndexTable) {
+ throw new RuntimeException(
+ "The table " + name + " is an index table. Please do drop index instead.");
+ }
+
+ if (!isIndexTable) {
+ try {
+ List<Index> indexes = ms.getIndexes(dbname, name, Short.MAX_VALUE);
+ while (indexes != null && indexes.size() > 0) {
+ for (Index idx : indexes) {
+ this.drop_index_by_name(dbname, name, idx.getIndexName(), true);
+ }
+ indexes = ms.getIndexes(dbname, name, Short.MAX_VALUE);
+ }
+ } catch (TException e) {
+ throw new MetaException(e.getMessage());
+ }
+ }
+ isExternal = isExternal(tbl);
+ if (tbl.getSd().getLocation() != null) {
+ tblPath = new Path(tbl.getSd().getLocation());
+ if (!wh.isWritable(tblPath.getParent())) {
+ String target = indexName == null ? "Table" : "Index table";
+ throw new MetaException(target + " metadata not deleted since " +
+ tblPath.getParent() + " is not writable by " +
+ SecurityUtils.getUser());
+ }
+ }
+
+ // Drop the partitions and get a list of locations which need to be deleted
+ partPaths = dropPartitionsAndGetLocations(ms, dbname, name, tblPath,
+ tbl.getPartitionKeys(), deleteData && !isExternal);
+ if (!ms.dropTable(dbname, name)) {
+ String tableName = dbname + "." + name;
+ throw new MetaException(indexName == null ? "Unable to drop table " + tableName:
+ "Unable to drop index table " + tableName + " for index " + indexName);
+ } else {
+ if (!transactionalListeners.isEmpty()) {
+ transactionalListenerResponses =
+ MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+ EventType.DROP_TABLE,
+ new DropTableEvent(tbl, true, deleteData, this),
+ envContext);
+ }
+ success = ms.commitTransaction();
+ }
+ } finally {
+ if (!success) {
+ ms.rollbackTransaction();
+ } else if (deleteData && !isExternal) {
+ // Data needs deletion. Check if trash may be skipped.
+ // Delete the data in the partitions which have other locations
+ deletePartitionData(partPaths, ifPurge);
+ // Delete the data in the table
+ deleteTableData(tblPath, ifPurge);
+ // ok even if the data is not deleted
+ }
+
+ if (!listeners.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(listeners,
+ EventType.DROP_TABLE,
+ new DropTableEvent(tbl, success, deleteData, this),
+ envContext,
+ transactionalListenerResponses, ms);
+ }
+ }
+ return success;
+ }
+
+ /**
+ * Deletes the data in a table's location, if it fails logs an error
+ *
+ * @param tablePath
+ */
+ private void deleteTableData(Path tablePath) {
+ deleteTableData(tablePath, false);
+ }
+
+ /**
+ * Deletes the data in a table's location, if it fails logs an error
+ *
+ * @param tablePath
+ * @param ifPurge completely purge the table (skipping trash) while removing
+ * data from warehouse
+ */
+ private void deleteTableData(Path tablePath, boolean ifPurge) {
+
+ if (tablePath != null) {
+ try {
+ wh.deleteDir(tablePath, true, ifPurge);
+ } catch (Exception e) {
+ LOG.error("Failed to delete table directory: " + tablePath +
+ " " + e.getMessage());
+ }
+ }
+ }
+
+ /**
+ * Give a list of partitions' locations, tries to delete each one
+ * and for each that fails logs an error.
+ *
+ * @param partPaths
+ */
+ private void deletePartitionData(List<Path> partPaths) {
+ deletePartitionData(partPaths, false);
+ }
+
+ /**
+ * Give a list of partitions' locations, tries to delete each one
+ * and for each that fails logs an error.
+ *
+ * @param partPaths
+ * @param ifPurge completely purge the partition (skipping trash) while
+ * removing data from warehouse
+ */
+ private void deletePartitionData(List<Path> partPaths, boolean ifPurge) {
+ if (partPaths != null && !partPaths.isEmpty()) {
+ for (Path partPath : partPaths) {
+ try {
+ wh.deleteDir(partPath, true, ifPurge);
+ } catch (Exception e) {
+ LOG.error("Failed to delete partition directory: " + partPath +
+ " " + e.getMessage());
+ }
+ }
+ }
+ }
+
+ /**
+ * Retrieves the partitions specified by partitionKeys. If checkLocation, for locations of
+ * partitions which may not be subdirectories of tablePath checks to make the locations are
+ * writable.
+ *
+ * Drops the metadata for each partition.
+ *
+ * Provides a list of locations of partitions which may not be subdirectories of tablePath.
+ *
+ * @param ms
+ * @param dbName
+ * @param tableName
+ * @param tablePath
+ * @param partitionKeys
+ * @param checkLocation
+ * @return
+ * @throws MetaException
+ * @throws IOException
+ * @throws InvalidInputException
+ * @throws InvalidObjectException
+ * @throws NoSuchObjectException
+ */
+ private List<Path> dropPartitionsAndGetLocations(RawStore ms, String dbName,
+ String tableName, Path tablePath, List<FieldSchema> partitionKeys, boolean checkLocation)
+ throws MetaException, IOException, NoSuchObjectException, InvalidObjectException,
+ InvalidInputException {
+ int partitionBatchSize = MetastoreConf.getIntVar(conf,
+ ConfVars.BATCH_RETRIEVE_MAX);
+ Path tableDnsPath = null;
+ if (tablePath != null) {
+ tableDnsPath = wh.getDnsPath(tablePath);
+ }
+ List<Path> partPaths = new ArrayList<>();
+ Table tbl = ms.getTable(dbName, tableName);
+
+ // call dropPartition on each of the table's partitions to follow the
+ // procedure for cleanly dropping partitions.
+ while (true) {
+ List<Partition> partsToDelete = ms.getPartitions(dbName, tableName, partitionBatchSize);
+ if (partsToDelete == null || partsToDelete.isEmpty()) {
+ break;
+ }
+ List<String> partNames = new ArrayList<>();
+ for (Partition part : partsToDelete) {
+ if (checkLocation && part.getSd() != null &&
+ part.getSd().getLocation() != null) {
+
+ Path partPath = wh.getDnsPath(new Path(part.getSd().getLocation()));
+ if (tableDnsPath == null ||
+ (partPath != null && !isSubdirectory(tableDnsPath, partPath))) {
+ if (!wh.isWritable(partPath.getParent())) {
+ throw new MetaException("Table metadata not deleted since the partition " +
+ Warehouse.makePartName(partitionKeys, part.getValues()) +
+ " has parent location " + partPath.getParent() + " which is not writable " +
+ "by " + SecurityUtils.getUser());
+ }
+ partPaths.add(partPath);
+ }
+ }
+ partNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues()));
+ }
+ for (MetaStoreEventListener listener : listeners) {
+ //No drop part listener events fired for public listeners historically, for drop table case.
+ //Limiting to internal listeners for now, to avoid unexpected calls for public listeners.
+ if (listener instanceof HMSMetricsListener) {
+ for (@SuppressWarnings("unused") Partition part : partsToDelete) {
+ listener.onDropPartition(null);
+ }
+ }
+ }
+ ms.dropPartitions(dbName, tableName, partNames);
+ }
+
+ return partPaths;
+ }
+
+ @Override
+ public void drop_table(final String dbname, final String name, final boolean deleteData)
+ throws NoSuchObjectException, MetaException {
+ drop_table_with_environment_context(dbname, name, deleteData, null);
+ }
+
+ @Override
+ public void drop_table_with_environment_context(final String dbname, final String name,
+ final boolean deleteData, final EnvironmentContext envContext)
+ throws NoSuchObjectException, MetaException {
+ startTableFunction("drop_table", dbname, name);
+
+ boolean success = false;
+ Exception ex = null;
+ try {
+ success = drop_table_core(getMS(), dbname, name, deleteData, envContext, null);
+ } catch (IOException e) {
+ ex = e;
+ throw new MetaException(e.getMessage());
+ } catch (Exception e) {
+ ex = e;
+ throwMetaException(e);
+ } finally {
+ endFunction("drop_table", success, ex, name);
+ }
+
+ }
+
+ private void updateStatsForTruncate(Map<String,String> props, EnvironmentContext environmentContext) {
+ if (null == props) {
+ return;
+ }
+ for (String stat : StatsSetupConst.supportedStats) {
+ String statVal = props.get(stat);
+ if (statVal != null) {
+ //In the case of truncate table, we set the stats to be 0.
+ props.put(stat, "0");
+ }
+ }
+ //first set basic stats to true
+ StatsSetupConst.setBasicStatsState(props, StatsSetupConst.TRUE);
+ environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
+ //then invalidate column stats
+ StatsSetupConst.clearColumnStatsState(props);
+ return;
+ }
+
+ private void alterPartitionForTruncate(final RawStore ms,
+ final String dbName,
+ final String tableName,
+ final Table table,
+ final Partition partition) throws Exception {
+ EnvironmentContext environmentContext = new EnvironmentContext();
+ updateStatsForTruncate(partition.getParameters(), environmentContext);
+
+ if (!transactionalListeners.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+ EventType.ALTER_PARTITION,
+ new AlterPartitionEvent(partition, partition, table, true, true, this));
+ }
+
+ if (!listeners.isEmpty()) {
+ MetaStoreListenerNotifier.notifyEvent(listeners,
+ EventType.ALTER_PARTITION,
+ new AlterPartitionEvent(partition, partition, table, true, true, this));
+ }
+
+ alterHandler.alterPartition(ms, wh, dbName, tableName, null, partition, environmentContext, this);
+ }
+
+ private void alterTableStatsForTruncate(final RawStore ms,
+ final String dbName,
+ final String tableName,
+ final Table table,
+ final List<String> partNames) throws Exception {
+ if (partNames == null) {
+ if (0 != table.getPartitionKeysSize()) {
+ for (Partition partition : ms.getPartitions(dbName, tableName, Integer.MAX_VALUE)) {
+ alterPartitionForTruncate(ms, dbName, tableName, table, partition);
+
<TRUNCATED>
[5/7] hive git commit: HIVE-17967 Move HiveMetaStore class. This
closes #270 (Alan Gates, reviewed by Thejas Nair).
Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index 24590b9..15bd803 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -18,15 +18,12 @@
package org.apache.hadoop.hive.metastore;
-import java.io.File;
import java.io.IOException;
import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.net.URL;
-import java.net.URLClassLoader;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -43,7 +40,6 @@ import com.google.common.collect.Maps;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.shims.ShimLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
@@ -53,28 +49,18 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMerger;
-import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMergerFactory;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.SerDeUtils;
-import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -82,16 +68,9 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.security.SaslRpcServer;
-import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.util.MachineList;
-import org.apache.hive.common.util.HiveStringUtils;
import org.apache.hive.common.util.ReflectionUtil;
-import javax.annotation.Nullable;
-
public class MetaStoreUtils {
private static final Logger LOG = LoggerFactory.getLogger("hive.log");
@@ -104,241 +83,20 @@ public class MetaStoreUtils {
// HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES in HiveConf as well.
public static final char[] specialCharactersInTableNames = new char[] { '/' };
- public static Table createColumnsetSchema(String name, List<String> columns,
- List<String> partCols, Configuration conf) throws MetaException {
-
- if (columns == null) {
- throw new MetaException("columns not specified for table " + name);
- }
-
- Table tTable = new Table();
- tTable.setTableName(name);
- tTable.setSd(new StorageDescriptor());
- StorageDescriptor sd = tTable.getSd();
- sd.setSerdeInfo(new SerDeInfo());
- SerDeInfo serdeInfo = sd.getSerdeInfo();
- serdeInfo.setSerializationLib(LazySimpleSerDe.class.getName());
- serdeInfo.setParameters(new HashMap<String, String>());
- serdeInfo.getParameters().put(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT,
- Warehouse.DEFAULT_SERIALIZATION_FORMAT);
-
- List<FieldSchema> fields = new ArrayList<FieldSchema>(columns.size());
- sd.setCols(fields);
- for (String col : columns) {
- FieldSchema field = new FieldSchema(col,
- org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME, "'default'");
- fields.add(field);
- }
-
- tTable.setPartitionKeys(new ArrayList<FieldSchema>());
- for (String partCol : partCols) {
- FieldSchema part = new FieldSchema();
- part.setName(partCol);
- part.setType(org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME); // default
- // partition
- // key
- tTable.getPartitionKeys().add(part);
- }
- sd.setNumBuckets(-1);
- return tTable;
- }
-
- /**
- * recursiveDelete
- *
- * just recursively deletes a dir - you'd think Java would have something to
- * do this??
- *
- * @param f
- * - the file/dir to delete
- * @exception IOException
- * propogate f.delete() exceptions
- *
- */
- static public void recursiveDelete(File f) throws IOException {
- if (f.isDirectory()) {
- File fs[] = f.listFiles();
- for (File subf : fs) {
- recursiveDelete(subf);
- }
- }
- if (!f.delete()) {
- throw new IOException("could not delete: " + f.getPath());
- }
- }
-
- /**
- * @param partParams
- * @return True if the passed Parameters Map contains values for all "Fast Stats".
- */
- private static boolean containsAllFastStats(Map<String, String> partParams) {
- for (String stat : StatsSetupConst.fastStats) {
- if (!partParams.containsKey(stat)) {
- return false;
- }
- }
- return true;
- }
-
- static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh,
- boolean madeDir, EnvironmentContext environmentContext) throws MetaException {
- return updateTableStatsFast(db, tbl, wh, madeDir, false, environmentContext);
- }
-
- private static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh,
- boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
- if (tbl.getPartitionKeysSize() == 0) {
- // Update stats only when unpartitioned
- FileStatus[] fileStatuses = wh.getFileStatusesForUnpartitionedTable(db, tbl);
- return updateTableStatsFast(tbl, fileStatuses, madeDir, forceRecompute, environmentContext);
- } else {
- return false;
- }
- }
-
- /**
- * Updates the numFiles and totalSize parameters for the passed Table by querying
- * the warehouse if the passed Table does not already have values for these parameters.
- * @param tbl
- * @param fileStatus
- * @param newDir if true, the directory was just created and can be assumed to be empty
- * @param forceRecompute Recompute stats even if the passed Table already has
- * these parameters set
- * @return true if the stats were updated, false otherwise
- */
- public static boolean updateTableStatsFast(Table tbl, FileStatus[] fileStatus, boolean newDir,
- boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
-
- Map<String,String> params = tbl.getParameters();
-
- if ((params!=null) && params.containsKey(StatsSetupConst.DO_NOT_UPDATE_STATS)){
- boolean doNotUpdateStats = Boolean.valueOf(params.get(StatsSetupConst.DO_NOT_UPDATE_STATS));
- params.remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
- tbl.setParameters(params); // to make sure we remove this marker property
- if (doNotUpdateStats){
- return false;
- }
- }
-
- boolean updated = false;
- if (forceRecompute ||
- params == null ||
- !containsAllFastStats(params)) {
- if (params == null) {
- params = new HashMap<String,String>();
- }
- if (!newDir) {
- // The table location already exists and may contain data.
- // Let's try to populate those stats that don't require full scan.
- LOG.info("Updating table stats fast for " + tbl.getTableName());
- populateQuickStats(fileStatus, params);
- LOG.info("Updated size of table " + tbl.getTableName() +" to "+ params.get(StatsSetupConst.TOTAL_SIZE));
- if (environmentContext != null
- && environmentContext.isSetProperties()
- && StatsSetupConst.TASK.equals(environmentContext.getProperties().get(
- StatsSetupConst.STATS_GENERATED))) {
- StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE);
- } else {
- StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE);
- }
- }
- tbl.setParameters(params);
- updated = true;
- }
- return updated;
- }
-
public static void populateQuickStats(FileStatus[] fileStatus, Map<String, String> params) {
- int numFiles = 0;
- long tableSize = 0L;
- String s = "LOG14535 Populating quick stats for: ";
- for (FileStatus status : fileStatus) {
- s += status.getPath() + ", ";
- // don't take directories into account for quick stats
- if (!status.isDir()) {
- tableSize += status.getLen();
- numFiles += 1;
- }
- }
- LOG.info(s/*, new Exception()*/);
- params.put(StatsSetupConst.NUM_FILES, Integer.toString(numFiles));
- params.put(StatsSetupConst.TOTAL_SIZE, Long.toString(tableSize));
+ org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.populateQuickStats(fileStatus, params);
}
- static boolean updatePartitionStatsFast(Partition part, Warehouse wh, EnvironmentContext environmentContext)
- throws MetaException {
- return updatePartitionStatsFast(part, wh, false, false, environmentContext);
+ public static boolean updateTableStatsFast(Table tbl, FileStatus[] fileStatus, boolean newDir,
+ boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
+ return org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.updateTableStatsFast(
+ tbl, fileStatus, newDir, forceRecompute, environmentContext);
}
- static boolean updatePartitionStatsFast(Partition part, Warehouse wh, boolean madeDir, EnvironmentContext environmentContext)
+ public static boolean updatePartitionStatsFast(Partition part, Warehouse wh, EnvironmentContext environmentContext)
throws MetaException {
- return updatePartitionStatsFast(part, wh, madeDir, false, environmentContext);
- }
-
- /**
- * Updates the numFiles and totalSize parameters for the passed Partition by querying
- * the warehouse if the passed Partition does not already have values for these parameters.
- * @param part
- * @param wh
- * @param madeDir if true, the directory was just created and can be assumed to be empty
- * @param forceRecompute Recompute stats even if the passed Partition already has
- * these parameters set
- * @return true if the stats were updated, false otherwise
- */
- private static boolean updatePartitionStatsFast(Partition part, Warehouse wh,
- boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
- return updatePartitionStatsFast(new PartitionSpecProxy.SimplePartitionWrapperIterator(part),
- wh, madeDir, forceRecompute, environmentContext);
- }
-
- /**
- * Updates the numFiles and totalSize parameters for the passed Partition by querying
- * the warehouse if the passed Partition does not already have values for these parameters.
- * @param part
- * @param wh
- * @param madeDir if true, the directory was just created and can be assumed to be empty
- * @param forceRecompute Recompute stats even if the passed Partition already has
- * these parameters set
- * @return true if the stats were updated, false otherwise
- */
- static boolean updatePartitionStatsFast(PartitionSpecProxy.PartitionIterator part, Warehouse wh,
- boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
- Map<String,String> params = part.getParameters();
- boolean updated = false;
- if (forceRecompute ||
- params == null ||
- !containsAllFastStats(params)) {
- if (params == null) {
- params = new HashMap<String,String>();
- }
- if (!madeDir) {
- // The partition location already existed and may contain data. Lets try to
- // populate those statistics that don't require a full scan of the data.
- LOG.warn("Updating partition stats fast for: " + part.getTableName());
- FileStatus[] fileStatus = wh.getFileStatusesForLocation(part.getLocation());
- populateQuickStats(fileStatus, params);
- LOG.warn("Updated size to " + params.get(StatsSetupConst.TOTAL_SIZE));
- updateBasicState(environmentContext, params);
- }
- part.setParameters(params);
- updated = true;
- }
- return updated;
- }
-
- private static void updateBasicState(EnvironmentContext environmentContext, Map<String,String>
- params) {
- if (params == null) {
- return;
- }
- if (environmentContext != null
- && environmentContext.isSetProperties()
- && StatsSetupConst.TASK.equals(environmentContext.getProperties().get(
- StatsSetupConst.STATS_GENERATED))) {
- StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE);
- } else {
- StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE);
- }
+ return org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.updatePartitionStatsFast(
+ part, wh, environmentContext);
}
/**
@@ -435,53 +193,6 @@ public class MetaStoreUtils {
}
}
- static public void deleteWHDirectory(Path path, Configuration conf,
- boolean use_trash) throws MetaException {
-
- try {
- if (!path.getFileSystem(conf).exists(path)) {
- LOG.warn("drop data called on table/partition with no directory: "
- + path);
- return;
- }
-
- if (use_trash) {
-
- int count = 0;
- Path newPath = new Path("/Trash/Current"
- + path.getParent().toUri().getPath());
-
- if (path.getFileSystem(conf).exists(newPath) == false) {
- path.getFileSystem(conf).mkdirs(newPath);
- }
-
- do {
- newPath = new Path("/Trash/Current" + path.toUri().getPath() + "."
- + count);
- if (path.getFileSystem(conf).exists(newPath)) {
- count++;
- continue;
- }
- if (path.getFileSystem(conf).rename(path, newPath)) {
- break;
- }
- } while (++count < 50);
- if (count >= 50) {
- throw new MetaException("Rename failed due to maxing out retries");
- }
- } else {
- // directly delete it
- path.getFileSystem(conf).delete(path, true);
- }
- } catch (IOException e) {
- LOG.error("Got exception trying to delete data dir: " + e);
- throw new MetaException(e.getMessage());
- } catch (MetaException e) {
- LOG.error("Got exception trying to delete data dir: " + e);
- throw e;
- }
- }
-
/**
* Given a list of partition columns and a partial mapping from
* some partition columns to values the function returns the values
@@ -536,118 +247,12 @@ public class MetaStoreUtils {
return true;
}
- static public String validateTblColumns(List<FieldSchema> cols) {
- for (FieldSchema fieldSchema : cols) {
- if (!validateColumnName(fieldSchema.getName())) {
- return "name: " + fieldSchema.getName();
- }
- String typeError = validateColumnType(fieldSchema.getType());
- if (typeError != null) {
- return typeError;
- }
- }
- return null;
- }
-
- /**
- * @return true if oldType and newType are compatible.
- * Two types are compatible if we have internal functions to cast one to another.
- */
- static private boolean areColTypesCompatible(String oldType, String newType) {
-
- /*
- * RCFile default serde (ColumnarSerde) serializes the values in such a way that the
- * datatypes can be converted from string to any type. The map is also serialized as
- * a string, which can be read as a string as well. However, with any binary
- * serialization, this is not true.
- *
- * Primitive types like INT, STRING, BIGINT, etc are compatible with each other and are
- * not blocked.
- */
-
- return TypeInfoUtils.implicitConvertible(TypeInfoUtils.getTypeInfoFromTypeString(oldType),
- TypeInfoUtils.getTypeInfoFromTypeString(newType));
- }
-
public static final String TYPE_FROM_DESERIALIZER = "<derived from deserializer>";
- /**
- * validate column type
- *
- * if it is predefined, yes. otherwise no
- * @param type
- * @return
- */
- static public String validateColumnType(String type) {
- if (type.equals(TYPE_FROM_DESERIALIZER)) {
- return null;
- }
- int last = 0;
- boolean lastAlphaDigit = isValidTypeChar(type.charAt(last));
- for (int i = 1; i <= type.length(); i++) {
- if (i == type.length()
- || isValidTypeChar(type.charAt(i)) != lastAlphaDigit) {
- String token = type.substring(last, i);
- last = i;
- if (!hiveThriftTypeMap.contains(token)) {
- return "type: " + type;
- }
- break;
- }
- }
- return null;
- }
-
- private static boolean isValidTypeChar(char c) {
- return Character.isLetterOrDigit(c) || c == '_';
- }
-
- public static String validateSkewedColNames(List<String> cols) {
- if (CollectionUtils.isEmpty(cols)) {
- return null;
- }
- for (String col : cols) {
- if (!validateColumnName(col)) {
- return col;
- }
- }
- return null;
- }
-
- public static String validateSkewedColNamesSubsetCol(List<String> skewedColNames,
- List<FieldSchema> cols) {
- if (CollectionUtils.isEmpty(skewedColNames)) {
- return null;
- }
- List<String> colNames = new ArrayList<String>(cols.size());
- for (FieldSchema fieldSchema : cols) {
- colNames.add(fieldSchema.getName());
- }
- // make a copy
- List<String> copySkewedColNames = new ArrayList<String>(skewedColNames);
- // remove valid columns
- copySkewedColNames.removeAll(colNames);
- if (copySkewedColNames.isEmpty()) {
- return null;
- }
- return copySkewedColNames.toString();
- }
public static String getListType(String t) {
return "array<" + t + ">";
}
- public static String getMapType(String k, String v) {
- return "map<" + k + "," + v + ">";
- }
-
- public static void setSerdeParam(SerDeInfo sdi, Properties schema,
- String param) {
- String val = schema.getProperty(param);
- if (org.apache.commons.lang.StringUtils.isNotBlank(val)) {
- sdi.getParameters().put(param, val);
- }
- }
-
static HashMap<String, String> typeToThriftTypeMap;
static {
typeToThriftTypeMap = new HashMap<String, String>();
@@ -725,42 +330,6 @@ public class MetaStoreUtils {
}
/**
- * Convert FieldSchemas to Thrift DDL + column names and column types
- *
- * @param structName
- * The name of the table
- * @param fieldSchemas
- * List of fields along with their schemas
- * @return String containing "Thrift
- * DDL#comma-separated-column-names#colon-separated-columntypes
- * Example:
- * "struct result { a string, map<int,string> b}#a,b#string:map<int,string>"
- */
- public static String getFullDDLFromFieldSchema(String structName,
- List<FieldSchema> fieldSchemas) {
- StringBuilder ddl = new StringBuilder();
- ddl.append(getDDLFromFieldSchema(structName, fieldSchemas));
- ddl.append('#');
- StringBuilder colnames = new StringBuilder();
- StringBuilder coltypes = new StringBuilder();
- boolean first = true;
- for (FieldSchema col : fieldSchemas) {
- if (first) {
- first = false;
- } else {
- colnames.append(',');
- coltypes.append(':');
- }
- colnames.append(col.getName());
- coltypes.append(col.getType());
- }
- ddl.append(colnames);
- ddl.append('#');
- ddl.append(coltypes);
- return ddl.toString();
- }
-
- /**
* Convert FieldSchemas to Thrift DDL.
*/
public static String getDDLFromFieldSchema(String structName,
@@ -1106,15 +675,131 @@ public class MetaStoreUtils {
return sb.toString();
}
- public static void makeDir(Path path, HiveConf hiveConf) throws MetaException {
- FileSystem fs;
+ public static int startMetaStore() throws Exception {
+ return startMetaStore(HadoopThriftAuthBridge.getBridge(), null);
+ }
+
+ public static int startMetaStore(final HadoopThriftAuthBridge bridge, HiveConf conf) throws Exception {
+ int port = findFreePort();
+ startMetaStore(port, bridge, conf);
+ return port;
+ }
+
+ public static int startMetaStore(HiveConf conf) throws Exception {
+ return startMetaStore(HadoopThriftAuthBridge.getBridge(), conf);
+ }
+
+ public static void startMetaStore(final int port, final HadoopThriftAuthBridge bridge) throws Exception {
+ startMetaStore(port, bridge, null);
+ }
+
+ public static void startMetaStore(final int port,
+ final HadoopThriftAuthBridge bridge, HiveConf hiveConf)
+ throws Exception{
+ if (hiveConf == null) {
+ hiveConf = new HiveConf(HMSHandler.class);
+ }
+ final HiveConf finalHiveConf = hiveConf;
+ Thread thread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ HiveMetaStore.startMetaStore(port, bridge, finalHiveConf);
+ } catch (Throwable e) {
+ LOG.error("Metastore Thrift Server threw an exception...",e);
+ }
+ }
+ });
+ thread.setDaemon(true);
+ thread.start();
+ loopUntilHMSReady(port);
+ }
+
+ /**
+ * A simple connect test to make sure that the metastore is up
+ * @throws Exception
+ */
+ private static void loopUntilHMSReady(int port) throws Exception {
+ int retries = 0;
+ Exception exc = null;
+ while (true) {
+ try {
+ Socket socket = new Socket();
+ socket.connect(new InetSocketAddress(port), 5000);
+ socket.close();
+ return;
+ } catch (Exception e) {
+ if (retries++ > 60) { //give up
+ exc = e;
+ break;
+ }
+ Thread.sleep(1000);
+ }
+ }
+ // something is preventing metastore from starting
+ // print the stack from all threads for debugging purposes
+ LOG.error("Unable to connect to metastore server: " + exc.getMessage());
+ LOG.info("Printing all thread stack traces for debugging before throwing exception.");
+ LOG.info(getAllThreadStacksAsString());
+ throw exc;
+ }
+
+ private static String getAllThreadStacksAsString() {
+ Map<Thread, StackTraceElement[]> threadStacks = Thread.getAllStackTraces();
+ StringBuilder sb = new StringBuilder();
+ for (Map.Entry<Thread, StackTraceElement[]> entry : threadStacks.entrySet()) {
+ Thread t = entry.getKey();
+ sb.append(System.lineSeparator());
+ sb.append("Name: ").append(t.getName()).append(" State: ").append(t.getState());
+ addStackString(entry.getValue(), sb);
+ }
+ return sb.toString();
+ }
+
+ private static void addStackString(StackTraceElement[] stackElems, StringBuilder sb) {
+ sb.append(System.lineSeparator());
+ for (StackTraceElement stackElem : stackElems) {
+ sb.append(stackElem).append(System.lineSeparator());
+ }
+ }
+
+ /**
+ * Finds a free port on the machine.
+ *
+ * @return
+ * @throws IOException
+ */
+ public static int findFreePort() throws IOException {
+ ServerSocket socket= new ServerSocket(0);
+ int port = socket.getLocalPort();
+ socket.close();
+ return port;
+ }
+
+ /**
+ * Finds a free port on the machine, but allow the
+ * ability to specify a port number to not use, no matter what.
+ */
+ public static int findFreePortExcepting(int portToExclude) throws IOException {
+ ServerSocket socket1 = null;
+ ServerSocket socket2 = null;
try {
- fs = path.getFileSystem(hiveConf);
- if (!fs.exists(path)) {
- fs.mkdirs(path);
+ socket1 = new ServerSocket(0);
+ socket2 = new ServerSocket(0);
+ if (socket1.getLocalPort() != portToExclude) {
+ return socket1.getLocalPort();
+ }
+ // If we're here, then socket1.getLocalPort was the port to exclude
+ // Since both sockets were open together at a point in time, we're
+ // guaranteed that socket2.getLocalPort() is not the same.
+ return socket2.getLocalPort();
+ } finally {
+ if (socket1 != null){
+ socket1.close();
+ }
+ if (socket2 != null){
+ socket2.close();
}
- } catch (IOException e) {
- throw new MetaException("Unable to : " + path);
}
}
@@ -1224,52 +909,12 @@ public class MetaStoreUtils {
return "TRUE".equalsIgnoreCase(params.get("EXTERNAL"));
}
- /**
- * Determines whether a table is an immutable table.
- * Immutable tables are write-once/replace, and do not support append. Partitioned
- * immutable tables do support additions by way of creation of new partitions, but
- * do not allow the partitions themselves to be appended to. "INSERT INTO" will not
- * work for Immutable tables.
- *
- * @param table table of interest
- *
- * @return true if immutable
- */
- public static boolean isImmutableTable(Table table) {
- if (table == null){
- return false;
- }
- Map<String, String> params = table.getParameters();
- if (params == null) {
- return false;
- }
-
- return "TRUE".equalsIgnoreCase(params.get(hive_metastoreConstants.IS_IMMUTABLE));
- }
-
public static boolean isArchived(
org.apache.hadoop.hive.metastore.api.Partition part) {
Map<String, String> params = part.getParameters();
return "TRUE".equalsIgnoreCase(params.get(hive_metastoreConstants.IS_ARCHIVED));
}
- public static Path getOriginalLocation(
- org.apache.hadoop.hive.metastore.api.Partition part) {
- Map<String, String> params = part.getParameters();
- assert(isArchived(part));
- String originalLocation = params.get(hive_metastoreConstants.ORIGINAL_LOCATION);
- assert( originalLocation != null);
-
- return new Path(originalLocation);
- }
-
- public static boolean isNonNativeTable(Table table) {
- if (table == null || table.getParameters() == null) {
- return false;
- }
- return (table.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE) != null);
- }
-
/**
* Filter that filters out hidden files
*/
@@ -1300,29 +945,6 @@ public class MetaStoreUtils {
return true;
}
- /**
- * Returns true if partial has the same values as full for all values that
- * aren't empty in partial.
- */
-
- public static boolean pvalMatches(List<String> partial, List<String> full) {
- if(partial.size() > full.size()) {
- return false;
- }
- Iterator<String> p = partial.iterator();
- Iterator<String> f = full.iterator();
-
- while(p.hasNext()) {
- String pval = p.next();
- String fval = f.next();
-
- if (pval.length() != 0 && !pval.equals(fval)) {
- return false;
- }
- }
- return true;
- }
-
public static String getIndexTableName(String dbName, String baseTblName, String indexName) {
return dbName + "__" + baseTblName + "_" + indexName + "__";
}
@@ -1341,26 +963,6 @@ public class MetaStoreUtils {
return TableType.MATERIALIZED_VIEW.toString().equals(table.getTableType());
}
- /**
- * Given a map of partition column names to values, this creates a filter
- * string that can be used to call the *byFilter methods
- * @param m
- * @return the filter string
- */
- public static String makeFilterStringFromMap(Map<String, String> m) {
- StringBuilder filter = new StringBuilder();
- for (Entry<String, String> e : m.entrySet()) {
- String col = e.getKey();
- String val = e.getValue();
- if (filter.length() == 0) {
- filter.append(col + "=\"" + val + "\"");
- } else {
- filter.append(" and " + col + "=\"" + val + "\"");
- }
- }
- return filter.toString();
- }
-
public static boolean isView(Table table) {
if (table == null) {
return false;
@@ -1368,42 +970,6 @@ public class MetaStoreUtils {
return TableType.VIRTUAL_VIEW.toString().equals(table.getTableType());
}
- /**
- * create listener instances as per the configuration.
- *
- * @param clazz
- * @param conf
- * @param listenerImplList
- * @return
- * @throws MetaException
- */
- static <T> List<T> getMetaStoreListeners(Class<T> clazz,
- HiveConf conf, String listenerImplList) throws MetaException {
- List<T> listeners = new ArrayList<T>();
-
- if (StringUtils.isBlank(listenerImplList)) {
- return listeners;
- }
-
- String[] listenerImpls = listenerImplList.split(",");
- for (String listenerImpl : listenerImpls) {
- try {
- T listener = (T) Class.forName(
- listenerImpl.trim(), true, JavaUtils.getClassLoader()).getConstructor(
- Configuration.class).newInstance(conf);
- listeners.add(listener);
- } catch (InvocationTargetException ie) {
- throw new MetaException("Failed to instantiate listener named: "+
- listenerImpl + ", reason: " + ie.getCause());
- } catch (Exception e) {
- throw new MetaException("Failed to instantiate listener named: "+
- listenerImpl + ", reason: " + e);
- }
- }
-
- return listeners;
- }
-
@SuppressWarnings("unchecked")
public static Class<? extends RawStore> getClass(String rawStoreClassName)
throws MetaException {
@@ -1447,24 +1013,6 @@ public class MetaStoreUtils {
}
}
- public static void validatePartitionNameCharacters(List<String> partVals,
- Pattern partitionValidationPattern) throws MetaException {
-
- String invalidPartitionVal =
- HiveStringUtils.getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern);
- if (invalidPartitionVal != null) {
- throw new MetaException("Partition value '" + invalidPartitionVal +
- "' contains a character " + "not matched by whitelist pattern '" +
- partitionValidationPattern.toString() + "'. " + "(configure with " +
- HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.varname + ")");
- }
- }
-
- public static boolean partitionNameHasValidCharacters(List<String> partVals,
- Pattern partitionValidationPattern) {
- return HiveStringUtils.getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern) == null;
- }
-
/**
* @param schema1: The first schema to be compared
* @param schema2: The second schema to be compared
@@ -1537,117 +1085,6 @@ public class MetaStoreUtils {
return names;
}
- /**
- * Helper function to transform Nulls to empty strings.
- */
- private static final com.google.common.base.Function<String,String> transFormNullsToEmptyString
- = new com.google.common.base.Function<String, String>() {
- @Override
- public java.lang.String apply(@Nullable java.lang.String string) {
- return StringUtils.defaultString(string);
- }
- };
-
- /**
- * Create a URL from a string representing a path to a local file.
- * The path string can be just a path, or can start with file:/, file:///
- * @param onestr path string
- * @return
- */
- private static URL urlFromPathString(String onestr) {
- URL oneurl = null;
- try {
- if (onestr.startsWith("file:/")) {
- oneurl = new URL(onestr);
- } else {
- oneurl = new File(onestr).toURL();
- }
- } catch (Exception err) {
- LOG.error("Bad URL " + onestr + ", ignoring path");
- }
- return oneurl;
- }
-
- /**
- * Add new elements to the classpath.
- *
- * @param newPaths
- * Array of classpath elements
- */
- public static ClassLoader addToClassPath(ClassLoader cloader, String[] newPaths) throws Exception {
- URLClassLoader loader = (URLClassLoader) cloader;
- List<URL> curPath = Arrays.asList(loader.getURLs());
- ArrayList<URL> newPath = new ArrayList<URL>(curPath.size());
-
- // get a list with the current classpath components
- for (URL onePath : curPath) {
- newPath.add(onePath);
- }
- curPath = newPath;
-
- for (String onestr : newPaths) {
- URL oneurl = urlFromPathString(onestr);
- if (oneurl != null && !curPath.contains(oneurl)) {
- curPath.add(oneurl);
- }
- }
-
- return new URLClassLoader(curPath.toArray(new URL[0]), loader);
- }
-
- protected static void getMergableCols(ColumnStatistics csNew, Map<String, String> parameters) {
- List<ColumnStatisticsObj> list = new ArrayList<>();
- for (int index = 0; index < csNew.getStatsObj().size(); index++) {
- ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index);
- // canColumnStatsMerge guarantees that it is accurate before we do merge
- if (StatsSetupConst.canColumnStatsMerge(parameters, statsObjNew.getColName())) {
- list.add(statsObjNew);
- }
- // in all the other cases, we can not merge
- }
- csNew.setStatsObj(list);
- }
-
- // this function will merge csOld into csNew.
- public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld)
- throws InvalidObjectException {
- List<ColumnStatisticsObj> list = new ArrayList<>();
- if (csNew.getStatsObj().size() != csOld.getStatsObjSize()) {
- // Some of the columns' stats are missing
- // This implies partition schema has changed. We will merge columns
- // present in both, overwrite stats for columns absent in metastore and
- // leave alone columns stats missing from stats task. This last case may
- // leave stats in stale state. This will be addressed later.
- LOG.debug("New ColumnStats size is {}, but old ColumnStats size is {}",
- csNew.getStatsObj().size(), csOld.getStatsObjSize());
- }
- // In this case, we have to find out which columns can be merged.
- Map<String, ColumnStatisticsObj> map = new HashMap<>();
- // We build a hash map from colName to object for old ColumnStats.
- for (ColumnStatisticsObj obj : csOld.getStatsObj()) {
- map.put(obj.getColName(), obj);
- }
- for (int index = 0; index < csNew.getStatsObj().size(); index++) {
- ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index);
- ColumnStatisticsObj statsObjOld = map.get(statsObjNew.getColName());
- if (statsObjOld != null) {
- // because we already confirm that the stats is accurate
- // it is impossible that the column types have been changed while the
- // column stats is still accurate.
- assert (statsObjNew.getStatsData().getSetField() == statsObjOld.getStatsData()
- .getSetField());
- // If statsObjOld is found, we can merge.
- ColumnStatsMerger merger = ColumnStatsMergerFactory.getColumnStatsMerger(statsObjNew,
- statsObjOld);
- merger.merge(statsObjNew, statsObjOld);
- }
- // If statsObjOld is not found, we just use statsObjNew as it is accurate.
- list.add(statsObjNew);
- }
- // in all the other cases, we can not merge
- csNew.setStatsObj(list);
- }
-
public static List<String> getColumnNames(List<FieldSchema> schema) {
List<String> cols = new ArrayList<>(schema.size());
for (FieldSchema fs : schema) {
@@ -1655,32 +1092,4 @@ public class MetaStoreUtils {
}
return cols;
}
-
- /**
- * Verify if the user is allowed to make DB notification related calls.
- * Only the superusers defined in the Hadoop proxy user settings have the permission.
- *
- * @param user the short user name
- * @param conf that contains the proxy user settings
- * @return if the user has the permission
- */
- public static boolean checkUserHasHostProxyPrivileges(String user, Configuration conf, String ipAddress) {
- DefaultImpersonationProvider sip = ProxyUsers.getDefaultImpersonationProvider();
- // Just need to initialize the ProxyUsers for the first time, given that the conf will not change on the fly
- if (sip == null) {
- ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
- sip = ProxyUsers.getDefaultImpersonationProvider();
- }
- Map<String, Collection<String>> proxyHosts = sip.getProxyHosts();
- Collection<String> hostEntries = proxyHosts.get(sip.getProxySuperuserIpConfKey(user));
- MachineList machineList = new MachineList(hostEntries);
- ipAddress = (ipAddress == null) ? StringUtils.EMPTY : ipAddress;
- return machineList.includes(ipAddress);
- }
-
- /** Duplicates AcidUtils; used in a couple places in metastore. */
- public static boolean isInsertOnlyTableParam(Map<String, String> params) {
- String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
- return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp));
- }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java b/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java
new file mode 100644
index 0000000..80fae28
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.utils.StringUtils;
+
+import java.util.List;
+
+public class SerDeStorageSchemaReader implements StorageSchemaReader {
+ @Override
+ public List<FieldSchema> readSchema(Table tbl, EnvironmentContext envContext, Configuration conf)
+ throws MetaException {
+ ClassLoader orgHiveLoader = null;
+ try {
+ if (envContext != null) {
+ String addedJars = envContext.getProperties().get("hive.added.jars.path");
+ if (org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) {
+ //for thread safe
+ orgHiveLoader = conf.getClassLoader();
+ ClassLoader loader = org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.addToClassPath(
+ orgHiveLoader, org.apache.commons.lang.StringUtils.split(addedJars, ","));
+ conf.setClassLoader(loader);
+ }
+ }
+
+ Deserializer s = MetaStoreUtils.getDeserializer(conf, tbl, false);
+ return MetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), s);
+ } catch (Exception e) {
+ StringUtils.stringifyException(e);
+ throw new MetaException(e.getMessage());
+ } finally {
+ if (orgHiveLoader != null) {
+ conf.setClassLoader(orgHiveLoader);
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
deleted file mode 100644
index 38b0875..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.lang.reflect.InvocationTargetException;
-import java.net.Socket;
-
-import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
-import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
-import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.transport.TSocket;
-import org.apache.thrift.transport.TTransport;
-
-/**
- * TSetIpAddressProcessor passes the IP address of the Thrift client to the HMSHandler.
- */
-public class TSetIpAddressProcessor<I extends Iface> extends ThriftHiveMetastore.Processor<Iface> {
-
- @SuppressWarnings("unchecked")
- public TSetIpAddressProcessor(I iface) throws SecurityException, NoSuchFieldException,
- IllegalArgumentException, IllegalAccessException, NoSuchMethodException,
- InvocationTargetException {
- super(iface);
- }
-
- @Override
- public boolean process(final TProtocol in, final TProtocol out) throws TException {
- setIpAddress(in);
-
- return super.process(in, out);
- }
-
- protected void setIpAddress(final TProtocol in) {
- TTransport transport = in.getTransport();
- if (!(transport instanceof TSocket)) {
- return;
- }
- setIpAddress(((TSocket)transport).getSocket());
- }
-
- protected void setIpAddress(final Socket inSocket) {
- HMSHandler.setThreadLocalIpAddress(inSocket.getInetAddress().getHostAddress());
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
deleted file mode 100644
index 64f0b96..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.net.Socket;
-import java.security.PrivilegedExceptionAction;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface;
-import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.set_ugi_args;
-import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.set_ugi_result;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.thrift.ProcessFunction;
-import org.apache.thrift.TApplicationException;
-import org.apache.thrift.TBase;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TMessage;
-import org.apache.thrift.protocol.TMessageType;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.protocol.TProtocolUtil;
-import org.apache.thrift.protocol.TType;
-
-/** TUGIBasedProcessor is used in unsecure mode for thrift metastore client server communication.
- * This processor checks whether the first rpc call after connection is set up is set_ugi()
- * through which client sends ugi to server. Processor then perform all subsequent rpcs on the
- * connection using ugi.doAs() so all actions are performed in client user context.
- * Note that old clients will never call set_ugi() and thus ugi will never be received on server
- * side, in which case server exhibits previous behavior and continues as usual.
- */
-@SuppressWarnings("rawtypes")
-public class TUGIBasedProcessor<I extends Iface> extends TSetIpAddressProcessor<Iface> {
-
- private final I iface;
- private final Map<String, org.apache.thrift.ProcessFunction<Iface, ? extends TBase>>
- functions;
- static final Logger LOG = LoggerFactory.getLogger(TUGIBasedProcessor.class);
-
- public TUGIBasedProcessor(I iface) throws SecurityException, NoSuchFieldException,
- IllegalArgumentException, IllegalAccessException, NoSuchMethodException,
- InvocationTargetException {
- super(iface);
- this.iface = iface;
- this.functions = getProcessMapView();
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public boolean process(final TProtocol in, final TProtocol out) throws TException {
- setIpAddress(in);
-
- final TMessage msg = in.readMessageBegin();
- final ProcessFunction<Iface, ? extends TBase> fn = functions.get(msg.name);
- if (fn == null) {
- TProtocolUtil.skip(in, TType.STRUCT);
- in.readMessageEnd();
- TApplicationException x = new TApplicationException(TApplicationException.UNKNOWN_METHOD,
- "Invalid method name: '"+msg.name+"'");
- out.writeMessageBegin(new TMessage(msg.name, TMessageType.EXCEPTION, msg.seqid));
- x.write(out);
- out.writeMessageEnd();
- out.getTransport().flush();
- return true;
- }
- TUGIContainingTransport ugiTrans = (TUGIContainingTransport)in.getTransport();
- // Store ugi in transport if the rpc is set_ugi
- if (msg.name.equalsIgnoreCase("set_ugi")){
- try {
- handleSetUGI(ugiTrans, (set_ugi<Iface>)fn, msg, in, out);
- } catch (TException e) {
- throw e;
- } catch (Exception e) {
- throw new TException(e.getCause());
- }
- return true;
- }
- UserGroupInformation clientUgi = ugiTrans.getClientUGI();
- if (null == clientUgi){
- // At this point, transport must contain client ugi, if it doesn't then its an old client.
- fn.process(msg.seqid, in, out, iface);
- return true;
- } else { // Found ugi, perform doAs().
- PrivilegedExceptionAction<Void> pvea = new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() {
- try {
- fn.process(msg.seqid,in, out, iface);
- return null;
- } catch (TException te) {
- throw new RuntimeException(te);
- }
- }
- };
- try {
- clientUgi.doAs(pvea);
- return true;
- } catch (RuntimeException rte) {
- if (rte.getCause() instanceof TException) {
- throw (TException)rte.getCause();
- }
- throw rte;
- } catch (InterruptedException ie) {
- throw new RuntimeException(ie); // unexpected!
- } catch (IOException ioe) {
- throw new RuntimeException(ioe); // unexpected!
- } finally {
- try {
- FileSystem.closeAllForUGI(clientUgi);
- } catch (IOException e) {
- LOG.error("Could not clean up file-system handles for UGI: " + clientUgi, e);
- }
- }
- }
- }
-
- private void handleSetUGI(TUGIContainingTransport ugiTrans,
- set_ugi<Iface> fn, TMessage msg, TProtocol iprot, TProtocol oprot)
- throws TException, SecurityException, NoSuchMethodException, IllegalArgumentException,
- IllegalAccessException, InvocationTargetException{
-
- UserGroupInformation clientUgi = ugiTrans.getClientUGI();
- if( null != clientUgi){
- throw new TException(new IllegalStateException("UGI is already set. Resetting is not " +
- "allowed. Current ugi is: " + clientUgi.getUserName()));
- }
-
- set_ugi_args args = fn.getEmptyArgsInstance();
- try {
- args.read(iprot);
- } catch (TProtocolException e) {
- iprot.readMessageEnd();
- TApplicationException x = new TApplicationException(TApplicationException.PROTOCOL_ERROR,
- e.getMessage());
- oprot.writeMessageBegin(new TMessage(msg.name, TMessageType.EXCEPTION, msg.seqid));
- x.write(oprot);
- oprot.writeMessageEnd();
- oprot.getTransport().flush();
- return;
- }
- iprot.readMessageEnd();
- set_ugi_result result = fn.getResult(iface, args);
- List<String> principals = result.getSuccess();
- // Store the ugi in transport and then continue as usual.
- ugiTrans.setClientUGI(UserGroupInformation.createRemoteUser(principals.remove(principals.size()-1)));
- oprot.writeMessageBegin(new TMessage(msg.name, TMessageType.REPLY, msg.seqid));
- result.write(oprot);
- oprot.writeMessageEnd();
- oprot.getTransport().flush();
- }
-
- @Override
- protected void setIpAddress(final TProtocol in) {
- TUGIContainingTransport ugiTrans = (TUGIContainingTransport)in.getTransport();
- Socket socket = ugiTrans.getSocket();
- if (socket != null) {
- setIpAddress(socket);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/metastore/src/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java b/metastore/src/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java
index 3c72c9c..daf67e4 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -17,28 +17,40 @@
*/
package org.apache.hadoop.hive.metastore.repl;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
-import java.util.TimerTask;
import java.util.concurrent.TimeUnit;
-public class DumpDirCleanerTask extends TimerTask {
+public class DumpDirCleanerTask implements MetastoreTaskThread {
public static final Logger LOG = LoggerFactory.getLogger(DumpDirCleanerTask.class);
- private final HiveConf conf;
- private final Path dumpRoot;
- private final long ttl;
+ private Configuration conf;
+ private Path dumpRoot;
+ private long ttl;
- public DumpDirCleanerTask(HiveConf conf) {
+ @Override
+ public void setConf(Configuration conf) {
this.conf = conf;
- dumpRoot = new Path(conf.getVar(HiveConf.ConfVars.REPLDIR));
- ttl = conf.getTimeVar(ConfVars.REPL_DUMPDIR_TTL, TimeUnit.MILLISECONDS);
+ dumpRoot = new Path(HiveConf.getVar(conf, ConfVars.REPLDIR));
+ ttl = HiveConf.getTimeVar(conf, ConfVars.REPL_DUMPDIR_TTL, TimeUnit.MILLISECONDS);
+ }
+
+ @Override
+ public Configuration getConf() {
+ return conf;
+ }
+
+ @Override
+ public long runFrequency(TimeUnit unit) {
+ return HiveConf.getTimeVar(conf, ConfVars.REPL_DUMPDIR_CLEAN_FREQ, unit);
}
@Override
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/metastore/src/test/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java
deleted file mode 100644
index 64cdfe0..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.hooks.JDOConnectionURLHook;
-
-/**
- *
- * DummyJdoConnectionUrlHook.
- *
- * An implementation of JDOConnectionURLHook which simply returns CORRECT_URL when
- * getJdoConnectionUrl is called.
- */
-public class DummyJdoConnectionUrlHook implements JDOConnectionURLHook {
-
- public static final String initialUrl = "BAD_URL";
- public static final String newUrl = "CORRECT_URL";
-
- @Override
- public String getJdoConnectionUrl(Configuration conf) throws Exception {
- return newUrl;
- }
-
- @Override
- public void notifyBadConnectionUrl(String url) {
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
deleted file mode 100644
index e8400be..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ /dev/null
@@ -1,1011 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
-
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-import org.junit.Assert;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
-import org.apache.thrift.TException;
-
-/**
- *
- * DummyRawStoreForJdoConnection.
- *
- * An implementation of RawStore that verifies the DummyJdoConnectionUrlHook has already been
- * applied when this class's setConf method is called, by checking that the value of the
- * METASTORECONNECTURLKEY ConfVar has been updated.
- *
- * All non-void methods return default values.
- */
-public class DummyRawStoreForJdoConnection implements RawStore {
-
- @Override
- public Configuration getConf() {
-
- return null;
- }
-
- @Override
- public void setConf(Configuration arg0) {
- String expected = DummyJdoConnectionUrlHook.newUrl;
- String actual = arg0.get(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname);
-
- Assert.assertEquals("The expected URL used by JDO to connect to the metastore: " + expected +
- " did not match the actual value when the Raw Store was initialized: " + actual,
- expected, actual);
- }
-
- @Override
- public void shutdown() {
-
-
- }
-
- @Override
- public boolean openTransaction() {
-
- return false;
- }
-
- @Override
- public boolean commitTransaction() {
- return false;
- }
-
- @Override
- public boolean isActiveTransaction() {
- return false;
- }
-
- @Override
- public void rollbackTransaction() {
- }
-
- @Override
- public void createDatabase(Database db) throws InvalidObjectException, MetaException {
-
-
- }
-
- @Override
- public Database getDatabase(String name) throws NoSuchObjectException {
-
- return null;
- }
-
- @Override
- public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException {
-
- return false;
- }
-
- @Override
- public boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException,
- MetaException {
-
- return false;
- }
-
- @Override
- public List<String> getDatabases(String pattern) throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<String> getAllDatabases() throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public boolean createType(Type type) {
-
- return false;
- }
-
- @Override
- public Type getType(String typeName) {
-
- return null;
- }
-
- @Override
- public boolean dropType(String typeName) {
-
- return false;
- }
-
- @Override
- public void createTable(Table tbl) throws InvalidObjectException, MetaException {
-
-
- }
-
- @Override
- public boolean dropTable(String dbName, String tableName) throws MetaException {
-
- return false;
- }
-
- @Override
- public Table getTable(String dbName, String tableName) throws MetaException {
-
- return null;
- }
-
- @Override
- public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
-
- return false;
- }
-
- @Override
- public Partition getPartition(String dbName, String tableName, List<String> part_vals)
- throws MetaException, NoSuchObjectException {
-
- return null;
- }
-
- @Override
- public boolean dropPartition(String dbName, String tableName, List<String> part_vals)
- throws MetaException {
-
- return false;
- }
-
- @Override
- public List<Partition> getPartitions(String dbName, String tableName, int max)
- throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException,
- MetaException {
-
-
- }
-
- @Override
- public List<String> getTables(String dbName, String pattern) throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<String> getTables(String dbName, String pattern, TableType tableType) throws MetaException {
- return Collections.emptyList();
- }
-
- @Override
- public List<TableMeta> getTableMeta(String dbNames, String tableNames, List<String> tableTypes)
- throws MetaException {
- return Collections.emptyList();
- }
-
- @Override
- public List<Table> getTableObjectsByName(String dbname, List<String> tableNames)
- throws MetaException, UnknownDBException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<String> getAllTables(String dbName) throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<String> listTableNamesByFilter(String dbName, String filter, short max_tables)
- throws MetaException, UnknownDBException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<String> listPartitionNames(String db_name, String tbl_name, short max_parts)
- throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, List<FieldSchema> cols, boolean applyDistinct, String filter, boolean ascending, List<FieldSchema> order, long maxParts) throws MetaException {
- return null;
- }
-
- @Override
- public List<String> listPartitionNamesByFilter(String db_name, String tbl_name, String filter,
- short max_parts) throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public void alterPartition(String db_name, String tbl_name, List<String> part_vals,
- Partition new_part) throws InvalidObjectException, MetaException {
-
-
- }
-
- @Override
- public void alterPartitions(String db_name, String tbl_name, List<List<String>> part_vals_list,
- List<Partition> new_parts) throws InvalidObjectException, MetaException {
-
-
- }
-
-
- @Override
- public boolean addIndex(Index index) throws InvalidObjectException, MetaException {
-
- return false;
- }
-
- @Override
- public Index getIndex(String dbName, String origTableName, String indexName)
- throws MetaException {
-
- return null;
- }
-
- @Override
- public boolean dropIndex(String dbName, String origTableName, String indexName)
- throws MetaException {
-
- return false;
- }
-
- @Override
- public List<Index> getIndexes(String dbName, String origTableName, int max)
- throws MetaException {
-
- return null;
- }
-
- @Override
- public List<String> listIndexNames(String dbName, String origTableName, short max)
- throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public void alterIndex(String dbname, String baseTblName, String name, Index newIndex)
- throws InvalidObjectException, MetaException {
-
-
- }
-
- @Override
- public List<Partition> getPartitionsByFilter(String dbName, String tblName, String filter,
- short maxParts) throws MetaException, NoSuchObjectException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<Partition> getPartitionsByNames(String dbName, String tblName,
- List<String> partNames) throws MetaException, NoSuchObjectException {
-
- return Collections.emptyList();
- }
-
- @Override
- public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
- String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
- return false;
- }
-
- @Override
- public int getNumPartitionsByFilter(String dbName, String tblName, String filter)
- throws MetaException, NoSuchObjectException {
- return -1;
- }
-
- @Override
- public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr)
- throws MetaException, NoSuchObjectException {
- return -1;
- }
-
- @Override
- public Table markPartitionForEvent(String dbName, String tblName, Map<String, String> partVals,
- PartitionEventType evtType) throws MetaException, UnknownTableException,
- InvalidPartitionException, UnknownPartitionException {
-
- return null;
- }
-
- @Override
- public boolean isPartitionMarkedForEvent(String dbName, String tblName,
- Map<String, String> partName, PartitionEventType evtType) throws MetaException,
- UnknownTableException, InvalidPartitionException, UnknownPartitionException {
-
- return false;
- }
-
- @Override
- public boolean addRole(String rowName, String ownerName) throws InvalidObjectException,
- MetaException, NoSuchObjectException {
-
- return false;
- }
-
- @Override
- public boolean removeRole(String roleName) throws MetaException, NoSuchObjectException {
-
- return false;
- }
-
- @Override
- public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor,
- PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException,
- InvalidObjectException {
-
- return false;
- }
-
- @Override
- public boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption)
- throws MetaException, NoSuchObjectException {
-
- return false;
- }
-
- @Override
- public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List<String> groupNames)
- throws InvalidObjectException, MetaException {
-
- return null;
- }
-
- @Override
- public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName,
- List<String> groupNames) throws InvalidObjectException, MetaException {
-
- return null;
- }
-
- @Override
- public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName,
- String userName, List<String> groupNames) throws InvalidObjectException, MetaException {
-
- return null;
- }
-
- @Override
- public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName,
- String partition, String userName, List<String> groupNames) throws InvalidObjectException,
- MetaException {
-
- return null;
- }
-
- @Override
- public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName,
- String partitionName, String columnName, String userName, List<String> groupNames)
- throws InvalidObjectException, MetaException {
-
- return null;
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
- PrincipalType principalType) {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
- PrincipalType principalType, String dbName) {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
- PrincipalType principalType, String dbName, String tableName) {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalPartitionGrants(String principalName,
- PrincipalType principalType, String dbName, String tableName, List<String> partValues,
- String partName) {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(String principalName,
- PrincipalType principalType, String dbName, String tableName, String columnName) {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(String principalName,
- PrincipalType principalType, String dbName, String tableName, List<String> partVals,
- String partName, String columnName) {
-
- return Collections.emptyList();
- }
-
- @Override
- public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException,
- MetaException, NoSuchObjectException {
-
- return false;
- }
-
- @Override
- public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
- throws InvalidObjectException, MetaException, NoSuchObjectException {
-
- return false;
- }
-
- @Override
- public Role getRole(String roleName) throws NoSuchObjectException {
-
- return null;
- }
-
- @Override
- public List<String> listRoleNames() {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<Role> listRoles(String principalName, PrincipalType principalType) {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
- PrincipalType principalType) {
- return Collections.emptyList();
- }
-
- @Override
- public List<RolePrincipalGrant> listRoleMembers(String roleName) {
- return null;
- }
-
- @Override
- public Partition getPartitionWithAuth(String dbName, String tblName, List<String> partVals,
- String user_name, List<String> group_names) throws MetaException, NoSuchObjectException,
- InvalidObjectException {
-
- return null;
- }
-
- @Override
- public List<Partition> getPartitionsWithAuth(String dbName, String tblName, short maxParts,
- String userName, List<String> groupNames) throws MetaException, NoSuchObjectException,
- InvalidObjectException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<String> listPartitionNamesPs(String db_name, String tbl_name, List<String> part_vals,
- short max_parts) throws MetaException, NoSuchObjectException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<Partition> listPartitionsPsWithAuth(String db_name, String tbl_name,
- List<String> part_vals, short max_parts, String userName, List<String> groupNames)
- throws MetaException, InvalidObjectException, NoSuchObjectException {
-
- return Collections.emptyList();
- }
-
- @Override
- public long cleanupEvents() {
-
- return 0;
- }
-
- @Override
- public boolean addToken(String tokenIdentifier, String delegationToken) {
- return false;
- }
-
- @Override
- public boolean removeToken(String tokenIdentifier) {
- return false;
- }
-
- @Override
- public String getToken(String tokenIdentifier) {
- return null;
- }
-
- @Override
- public List<String> getAllTokenIdentifiers() {
- return Collections.emptyList();
- }
-
- @Override
- public int addMasterKey(String key) {
- return 0;
- }
-
- @Override
- public void updateMasterKey(Integer seqNo, String key) {
- }
-
- @Override
- public boolean removeMasterKey(Integer keySeq) {
- return false;
- }
-
- @Override
- public String[] getMasterKeys() {
- return new String[0];
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
- String principalName, PrincipalType principalType) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
- String principalName, PrincipalType principalType) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
- String principalName, PrincipalType principalType) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
- String principalName, PrincipalType principalType) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
- String principalName, PrincipalType principalType) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listGlobalGrantsAll() {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listDBGrantsAll(String dbName) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String dbName, String tableName, String partitionName, String columnName) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listTableGrantsAll(String dbName, String tableName) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPartitionGrantsAll(String dbName, String tableName, String partitionName) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listTableColumnGrantsAll(String dbName, String tableName, String columnName) {
- return Collections.emptyList();
- }
-
- @Override
- public ColumnStatistics getTableColumnStatistics(String dbName, String tableName,
- List<String> colName) throws MetaException, NoSuchObjectException {
- return null;
- }
-
- @Override
- public boolean deleteTableColumnStatistics(String dbName, String tableName,
- String colName)
- throws NoSuchObjectException, MetaException, InvalidObjectException {
- return false;
- }
-
-
- @Override
- public boolean deletePartitionColumnStatistics(String dbName, String tableName,
- String partName, List<String> partVals, String colName)
- throws NoSuchObjectException, MetaException, InvalidObjectException,
- InvalidInputException {
- return false;
-
- }
-
- @Override
- public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
- throws NoSuchObjectException, MetaException, InvalidObjectException {
- return false;
- }
-
- @Override
- public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,List<String> partVals)
- throws NoSuchObjectException, MetaException, InvalidObjectException {
- return false;
- }
-
- @Override
- public void verifySchema() throws MetaException {
- }
-
- @Override
- public String getMetaStoreSchemaVersion() throws MetaException {
- return null;
- }
-
- @Override
- public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException {
- }
-
- @Override
- public List<ColumnStatistics> getPartitionColumnStatistics(String dbName,
- String tblName, List<String> colNames, List<String> partNames)
- throws MetaException, NoSuchObjectException {
- return Collections.emptyList();
- }
-
- @Override
- public boolean doesPartitionExist(String dbName, String tableName,
- List<String> partVals) throws MetaException, NoSuchObjectException {
- return false;
- }
-
- @Override
- public boolean addPartitions(String dbName, String tblName, List<Partition> parts)
- throws InvalidObjectException, MetaException {
- return false;
- }
-
- @Override
- public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
- return false;
- }
-
- @Override
- public void dropPartitions(String dbName, String tblName, List<String> partNames) {
- }
-
- @Override
- public void createFunction(Function func) throws InvalidObjectException,
- MetaException {
- }
-
- @Override
- public void alterFunction(String dbName, String funcName, Function newFunction)
- throws InvalidObjectException, MetaException {
- }
-
- @Override
- public void dropFunction(String dbName, String funcName)
- throws MetaException, NoSuchObjectException, InvalidObjectException,
- InvalidInputException {
- }
-
- @Override
- public Function getFunction(String dbName, String funcName)
- throws MetaException {
- return null;
- }
-
- @Override
- public List<Function> getAllFunctions()
- throws MetaException {
- return Collections.emptyList();
- }
-
- @Override
- public List<String> getFunctions(String dbName, String pattern)
- throws MetaException {
- return Collections.emptyList();
- }
-
- @Override
- public AggrStats get_aggr_stats_for(String dbName,
- String tblName, List<String> partNames, List<String> colNames)
- throws MetaException {
- return null;
- }
-
- @Override
- public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
- return null;
- }
-
- @Override
- public void addNotificationEvent(NotificationEvent event) {
-
- }
-
- @Override
- public void cleanNotificationEvents(int olderThan) {
-
- }
-
- @Override
- public CurrentNotificationEventId getCurrentNotificationEventId() {
- return null;
- }
-
- @Override
- public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) {
- return null;
- }
-
- public void flushCache() {
-
- }
-
- @Override
- public ByteBuffer[] getFileMetadata(List<Long> fileIds) {
- return null;
- }
-
- @Override
- public void putFileMetadata(
- List<Long> fileIds, List<ByteBuffer> metadata, FileMetadataExprType type) {
- }
-
- @Override
- public boolean isFileMetadataSupported() {
- return false;
- }
-
- @Override
- public void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
- ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
- }
-
- @Override
- public int getTableCount() throws MetaException {
- return 0;
- }
-
- @Override
- public int getPartitionCount() throws MetaException {
- return 0;
- }
-
- @Override
- public int getDatabaseCount() throws MetaException {
- return 0;
- }
-
- @Override
- public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) {
- return null;
- }
-
- @Override
- public List<SQLPrimaryKey> getPrimaryKeys(String db_name, String tbl_name)
- throws MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public List<SQLForeignKey> getForeignKeys(String parent_db_name,
- String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
- throws MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public List<SQLUniqueConstraint> getUniqueConstraints(String db_name, String tbl_name)
- throws MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public List<SQLNotNullConstraint> getNotNullConstraints(String db_name, String tbl_name)
- throws MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public List<String> createTableWithConstraints(Table tbl,
- List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
- List<SQLUniqueConstraint> uniqueConstraints,
- List<SQLNotNullConstraint> notNullConstraints)
- throws InvalidObjectException, MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public void dropConstraint(String dbName, String tableName,
- String constraintName) throws NoSuchObjectException {
- // TODO Auto-generated method stub
- }
-
- @Override
- public List<String> addPrimaryKeys(List<SQLPrimaryKey> pks)
- throws InvalidObjectException, MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public List<String> addForeignKeys(List<SQLForeignKey> fks)
- throws InvalidObjectException, MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- public List<String> addUniqueConstraints(List<SQLUniqueConstraint> uks)
- throws InvalidObjectException, MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public List<String> addNotNullConstraints(List<SQLNotNullConstraint> nns)
- throws InvalidObjectException, MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public Map<String, List<ColumnStatisticsObj>> getColStatsForTablePartitions(String dbName,
- String tableName) throws MetaException, NoSuchObjectException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public String getMetastoreDbUuid() throws MetaException {
- throw new MetaException("Get metastore uuid is not implemented");
- }
-
- @Override
- public void createResourcePlan(
- WMResourcePlan resourcePlan, int defaultPoolSize) throws MetaException {
- }
-
- @Override
- public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException {
- return null;
- }
-
- @Override
- public List<WMResourcePlan> getAllResourcePlans() throws MetaException {
- return null;
- }
-
- @Override
- public WMFullResourcePlan alterResourcePlan(
- String name, WMResourcePlan resourcePlan, boolean canActivateDisabled)
- throws NoSuchObjectException, InvalidOperationException, MetaException {
- return null;
- }
-
- @Override
- public WMFullResourcePlan getActiveResourcePlan() throws MetaException {
- return null;
- }
-
- @Override
- public boolean validateResourcePlan(String name)
- throws NoSuchObjectException, InvalidObjectException, MetaException {
- return false;
- }
-
- @Override
- public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException {
- }
-
- @Override
- public void createWMTrigger(WMTrigger trigger) throws MetaException {
- }
-
- @Override
- public void alterWMTrigger(WMTrigger trigger)
- throws NoSuchObjectException, InvalidOperationException, MetaException {
- }
-
- @Override
- public void dropWMTrigger(String resourcePlanName, String triggerName)
- throws NoSuchObjectException, MetaException {
- }
-
- @Override
- public List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
- throws NoSuchObjectException, MetaException {
- return null;
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/metastore/src/test/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java b/metastore/src/test/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
index 78ccf9f..28353c3 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
@@ -23,8 +23,11 @@ import java.net.ServerSocket;
import java.net.Socket;
import java.util.Map;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
[2/7] hive git commit: HIVE-17967 Move HiveMetaStore class. This
closes #270 (Alan Gates, reviewed by Thejas Nair).
Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java
index d3eee85..c880a9a 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java
@@ -60,7 +60,7 @@ public class MetaStoreInit {
try {
// We always call init because the hook name in the configuration could
// have changed.
- MetaStoreInit.initConnectionUrlHook(originalConf, updateData);
+ initConnectionUrlHook(originalConf, updateData);
if (updateData.urlHook != null) {
if (badUrl != null) {
updateData.urlHook.notifyBadConnectionUrl(badUrl);
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetastoreTaskThread.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetastoreTaskThread.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetastoreTaskThread.java
new file mode 100644
index 0000000..e5d21b0
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetastoreTaskThread.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Any task that will run as a separate thread in the metastore should implement this
+ * interface.
+ */
+public interface MetastoreTaskThread extends Configurable, Runnable {
+
+ /**
+ * Get the frequency at which the thread should be scheduled in the thread pool. You must call
+ * {@link #setConf(Configuration)} before calling this method.
+ * @param unit TimeUnit to express the frequency in.
+ * @return frequency
+ */
+ long runFrequency(TimeUnit unit);
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java
index 2671c1f..2fcc162 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java
@@ -80,10 +80,18 @@ public class PartFilterExprUtil {
try {
@SuppressWarnings("unchecked")
Class<? extends PartitionExpressionProxy> clazz =
- JavaUtils.getClass(className, PartitionExpressionProxy.class);
+ JavaUtils.getClass(className, PartitionExpressionProxy.class);
return JavaUtils.newInstance(
clazz, new Class<?>[0], new Object[0]);
} catch (MetaException e) {
+ if (e.getMessage().matches(".* class not found")) {
+ // TODO MS-SPLIT For now if we cannot load the default PartitionExpressionForMetastore
+ // class (since it's from ql) load the DefaultPartitionExpressionProxy, which just throws
+ // UnsupportedOperationExceptions. This allows existing Hive instances to work but also
+ // allows us to instantiate the metastore stand alone for testing. Not sure if this is
+ // the best long term solution.
+ return new DefaultPartitionExpressionProxy();
+ }
LOG.error("Error loading PartitionExpressionProxy", e);
throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage());
}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RunnableConfigurable.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RunnableConfigurable.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RunnableConfigurable.java
deleted file mode 100644
index 9fa5cab..0000000
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RunnableConfigurable.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.conf.Configurable;
-
-/**
- * Combination of Runnable and Configurable
- */
-public interface RunnableConfigurable extends Configurable, Runnable {
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StorageSchemaReader.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StorageSchemaReader.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StorageSchemaReader.java
new file mode 100644
index 0000000..6251e23
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StorageSchemaReader.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+import java.util.List;
+
+/**
+ * An interface to implement reading schemas from stored data.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+interface StorageSchemaReader {
+ /**
+ * Read the schema from the storage representation of the table.
+ * @param tbl metastore table object
+ * @param envContext environment context
+ * @param conf current configuration file
+ * @return list of field schemas
+ * @throws MetaException if the table storage could not be read
+ */
+ List<FieldSchema> readSchema(Table tbl, EnvironmentContext envContext, Configuration conf)
+ throws MetaException;
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
new file mode 100644
index 0000000..c0c9604
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.lang.reflect.InvocationTargetException;
+import java.net.Socket;
+
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+
+/**
+ * TSetIpAddressProcessor passes the IP address of the Thrift client to the HMSHandler.
+ */
+public class TSetIpAddressProcessor<I extends Iface> extends ThriftHiveMetastore.Processor<Iface> {
+
+ @SuppressWarnings("unchecked")
+ public TSetIpAddressProcessor(I iface) throws SecurityException, NoSuchFieldException,
+ IllegalArgumentException, IllegalAccessException, NoSuchMethodException,
+ InvocationTargetException {
+ super(iface);
+ }
+
+ @Override
+ public boolean process(final TProtocol in, final TProtocol out) throws TException {
+ setIpAddress(in);
+
+ return super.process(in, out);
+ }
+
+ protected void setIpAddress(final TProtocol in) {
+ TTransport transport = in.getTransport();
+ if (!(transport instanceof TSocket)) {
+ return;
+ }
+ setIpAddress(((TSocket)transport).getSocket());
+ }
+
+ protected void setIpAddress(final Socket inSocket) {
+ HMSHandler.setThreadLocalIpAddress(inSocket.getInetAddress().getHostAddress());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
new file mode 100644
index 0000000..5285b54
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.net.Socket;
+import java.security.PrivilegedExceptionAction;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
+import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface;
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.set_ugi_args;
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.set_ugi_result;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.thrift.ProcessFunction;
+import org.apache.thrift.TApplicationException;
+import org.apache.thrift.TBase;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TMessage;
+import org.apache.thrift.protocol.TMessageType;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.protocol.TProtocolUtil;
+import org.apache.thrift.protocol.TType;
+
+/** TUGIBasedProcessor is used in unsecure mode for thrift metastore client server communication.
+ * This processor checks whether the first rpc call after connection is set up is set_ugi()
+ * through which client sends ugi to server. Processor then perform all subsequent rpcs on the
+ * connection using ugi.doAs() so all actions are performed in client user context.
+ * Note that old clients will never call set_ugi() and thus ugi will never be received on server
+ * side, in which case server exhibits previous behavior and continues as usual.
+ */
+@SuppressWarnings("rawtypes")
+public class TUGIBasedProcessor<I extends Iface> extends TSetIpAddressProcessor<Iface> {
+
+ private final I iface;
+ private final Map<String, org.apache.thrift.ProcessFunction<Iface, ? extends TBase>>
+ functions;
+ static final Logger LOG = LoggerFactory.getLogger(TUGIBasedProcessor.class);
+
+ public TUGIBasedProcessor(I iface) throws SecurityException, NoSuchFieldException,
+ IllegalArgumentException, IllegalAccessException, NoSuchMethodException,
+ InvocationTargetException {
+ super(iface);
+ this.iface = iface;
+ this.functions = getProcessMapView();
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public boolean process(final TProtocol in, final TProtocol out) throws TException {
+ setIpAddress(in);
+
+ final TMessage msg = in.readMessageBegin();
+ final ProcessFunction<Iface, ? extends TBase> fn = functions.get(msg.name);
+ if (fn == null) {
+ TProtocolUtil.skip(in, TType.STRUCT);
+ in.readMessageEnd();
+ TApplicationException x = new TApplicationException(TApplicationException.UNKNOWN_METHOD,
+ "Invalid method name: '"+msg.name+"'");
+ out.writeMessageBegin(new TMessage(msg.name, TMessageType.EXCEPTION, msg.seqid));
+ x.write(out);
+ out.writeMessageEnd();
+ out.getTransport().flush();
+ return true;
+ }
+ TUGIContainingTransport ugiTrans = (TUGIContainingTransport)in.getTransport();
+ // Store ugi in transport if the rpc is set_ugi
+ if (msg.name.equalsIgnoreCase("set_ugi")){
+ try {
+ handleSetUGI(ugiTrans, (ThriftHiveMetastore.Processor.set_ugi<Iface>)fn, msg, in, out);
+ } catch (TException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new TException(e.getCause());
+ }
+ return true;
+ }
+ UserGroupInformation clientUgi = ugiTrans.getClientUGI();
+ if (null == clientUgi){
+ // At this point, transport must contain client ugi, if it doesn't then its an old client.
+ fn.process(msg.seqid, in, out, iface);
+ return true;
+ } else { // Found ugi, perform doAs().
+ PrivilegedExceptionAction<Void> pvea = new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() {
+ try {
+ fn.process(msg.seqid,in, out, iface);
+ return null;
+ } catch (TException te) {
+ throw new RuntimeException(te);
+ }
+ }
+ };
+ try {
+ clientUgi.doAs(pvea);
+ return true;
+ } catch (RuntimeException rte) {
+ if (rte.getCause() instanceof TException) {
+ throw (TException)rte.getCause();
+ }
+ throw rte;
+ } catch (InterruptedException ie) {
+ throw new RuntimeException(ie); // unexpected!
+ } catch (IOException ioe) {
+ throw new RuntimeException(ioe); // unexpected!
+ } finally {
+ try {
+ FileSystem.closeAllForUGI(clientUgi);
+ } catch (IOException e) {
+ LOG.error("Could not clean up file-system handles for UGI: " + clientUgi, e);
+ }
+ }
+ }
+ }
+
+ private void handleSetUGI(TUGIContainingTransport ugiTrans,
+ ThriftHiveMetastore.Processor.set_ugi<Iface> fn, TMessage msg, TProtocol iprot, TProtocol oprot)
+ throws TException, SecurityException, NoSuchMethodException, IllegalArgumentException,
+ IllegalAccessException, InvocationTargetException{
+
+ UserGroupInformation clientUgi = ugiTrans.getClientUGI();
+ if( null != clientUgi){
+ throw new TException(new IllegalStateException("UGI is already set. Resetting is not " +
+ "allowed. Current ugi is: " + clientUgi.getUserName()));
+ }
+
+ set_ugi_args args = fn.getEmptyArgsInstance();
+ try {
+ args.read(iprot);
+ } catch (TProtocolException e) {
+ iprot.readMessageEnd();
+ TApplicationException x = new TApplicationException(TApplicationException.PROTOCOL_ERROR,
+ e.getMessage());
+ oprot.writeMessageBegin(new TMessage(msg.name, TMessageType.EXCEPTION, msg.seqid));
+ x.write(oprot);
+ oprot.writeMessageEnd();
+ oprot.getTransport().flush();
+ return;
+ }
+ iprot.readMessageEnd();
+ set_ugi_result result = fn.getResult(iface, args);
+ List<String> principals = result.getSuccess();
+ // Store the ugi in transport and then continue as usual.
+ ugiTrans.setClientUGI(UserGroupInformation.createRemoteUser(principals.remove(principals.size()-1)));
+ oprot.writeMessageBegin(new TMessage(msg.name, TMessageType.REPLY, msg.seqid));
+ result.write(oprot);
+ oprot.writeMessageEnd();
+ oprot.getTransport().flush();
+ }
+
+ @Override
+ protected void setIpAddress(final TProtocol in) {
+ TUGIContainingTransport ugiTrans = (TUGIContainingTransport)in.getTransport();
+ Socket socket = ugiTrans.getSocket();
+ if (socket != null) {
+ setIpAddress(socket);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index fb904ab..d18ddc8 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -19,7 +19,15 @@ package org.apache.hadoop.hive.metastore.conf;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.DefaultStorageSchemaReader;
+import org.apache.hadoop.hive.metastore.HiveAlterHandler;
+import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
+import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
+import org.apache.hadoop.hive.metastore.txn.AcidCompactionHistoryService;
+import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService;
+import org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService;
+import org.apache.hadoop.hive.metastore.txn.AcidWriteSetService;
import org.apache.hadoop.hive.metastore.utils.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -99,6 +107,7 @@ public class MetastoreConf {
*/
public static final MetastoreConf.ConfVars[] metaVars = {
ConfVars.WAREHOUSE,
+ ConfVars.REPLDIR,
ConfVars.THRIFT_URIS,
ConfVars.SERVER_PORT,
ConfVars.THRIFT_CONNECTION_RETRIES,
@@ -239,7 +248,7 @@ public class MetastoreConf {
"hive.metastore.aggregate.stats.cache.ttl", 600, TimeUnit.SECONDS,
"Number of seconds for a cached node to be active in the cache before they become stale."),
ALTER_HANDLER("metastore.alter.handler", "hive.metastore.alter.impl",
- "org.apache.hadoop.hive.metastore.HiveAlterHandler",
+ HiveAlterHandler.class.getName(),
"Alter handler. For now defaults to the Hive one. Really need a better default option"),
ASYNC_LOG_ENABLED("metastore.async.log.enabled", "hive.async.log.enabled", true,
"Whether to enable Log4j2's asynchronous logging. Asynchronous logging can give\n" +
@@ -290,6 +299,10 @@ public class MetastoreConf {
CLIENT_CONNECT_RETRY_DELAY("metastore.client.connect.retry.delay",
"hive.metastore.client.connect.retry.delay", 1, TimeUnit.SECONDS,
"Number of seconds for the client to wait between consecutive connection attempts"),
+ CLIENT_KERBEROS_PRINCIPAL("metastore.client.kerberos.principal",
+ "hive.metastore.client.kerberos.principal",
+ "", // E.g. "hive-metastore/_HOST@EXAMPLE.COM".
+ "The Kerberos principal associated with the HA cluster of hcat_servers."),
CLIENT_SOCKET_LIFETIME("metastore.client.socket.lifetime",
"hive.metastore.client.socket.lifetime", 0, TimeUnit.SECONDS,
"MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" +
@@ -450,6 +463,10 @@ public class MetastoreConf {
"hive.metastore.event.message.factory",
"org.apache.hadoop.hive.metastore.messaging.json.JSONMessageFactory",
"Factory class for making encoding and decoding messages in the events generated."),
+ EVENT_DB_NOTIFICATION_API_AUTH("metastore.metastore.event.db.notification.api.auth",
+ "hive.metastore.event.db.notification.api.auth", true,
+ "Should metastore do authorization against database notification related APIs such as get_next_notification.\n" +
+ "If set to true, then only the superusers in proxy settings have the permission"),
EXECUTE_SET_UGI("metastore.execute.setugi", "hive.metastore.execute.setugi", true,
"In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" +
"the client's reported user and group permissions. Note that this property must be set on \n" +
@@ -587,6 +604,8 @@ public class MetastoreConf {
"Inteval for cmroot cleanup thread."),
REPLCMENABLED("metastore.repl.cm.enabled", "hive.repl.cm.enabled", false,
"Turn on ChangeManager, so delete files will go to cmrootdir."),
+ REPLDIR("metastore.repl.rootdir", "hive.repl.rootdir", "/user/hive/repl/",
+ "HDFS root dir for all replication dumps."),
REPL_COPYFILE_MAXNUMFILES("metastore.repl.copyfile.maxnumfiles",
"hive.exec.copyfile.maxnumfiles", 1L,
"Maximum number of files Hive uses to do sequential HDFS copies between directories." +
@@ -657,6 +676,10 @@ public class MetastoreConf {
"The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."),
STATS_DEFAULT_PUBLISHER("metastore.stats.default.publisher", "hive.stats.default.publisher", "",
"The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."),
+ STORAGE_SCHEMA_READER_IMPL("metastore.storage.schema.reader.impl", NO_SUCH_KEY,
+ DefaultStorageSchemaReader.class.getName(),
+ "The class to use to read schemas from storage. It must implement " +
+ "org.apache.hadoop.hive.metastore.StorageSchemaReader"),
STORE_MANAGER_TYPE("datanucleus.storeManagerType", "datanucleus.storeManagerType", "rdbms", "metadata store type"),
SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES("metastore.support.special.characters.tablename",
"hive.support.special.characters.tablename", true,
@@ -664,6 +687,19 @@ public class MetastoreConf {
+ "When it is set to false, only [a-zA-Z_0-9]+ are supported.\n"
+ "The only supported special character right now is '/'. This flag applies only to quoted table names.\n"
+ "The default value is true."),
+ TASK_THREADS_ALWAYS("metastore.task.threads.always", "metastore.task.threads.always",
+ EventCleanerTask.class.getName() + "," + "org.apache.hadoop.hive.metastore.repl.DumpDirCleanerTask",
+ "Comma separated list of tasks that will be started in separate threads. These will " +
+ "always be started, regardless of whether the metastore is running in embedded mode " +
+ "or in server mode. They must implement " + MetastoreTaskThread.class.getName()),
+ TASK_THREADS_REMOTE_ONLY("metastore.task.threads.remote", "metastore.task.threads.remote",
+ AcidHouseKeeperService.class.getName() + "," +
+ AcidOpenTxnsCounterService.class.getName() + "," +
+ AcidCompactionHistoryService.class.getName() + "," +
+ AcidWriteSetService.class.getName(),
+ "Command separated list of tasks that will be started in separate threads. These will be" +
+ " started only when the metastore is running as a separate service. They must " +
+ "implement " + MetastoreTaskThread.class.getName()),
TCP_KEEP_ALIVE("metastore.server.tcp.keepalive",
"hive.metastore.server.tcp.keepalive", true,
"Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."),
@@ -806,7 +842,8 @@ public class MetastoreConf {
// These are all values that we put here just for testing
STR_TEST_ENTRY("test.str", "hive.test.str", "defaultval", "comment"),
STR_SET_ENTRY("test.str.set", NO_SUCH_KEY, "a", new Validator.StringSet("a", "b", "c"), ""),
- STR_LIST_ENTRY("test.str.list", "hive.test.str.list", "a,b,c", "no comment"),
+ STR_LIST_ENTRY("test.str.list", "hive.test.str.list", "a,b,c",
+ "no comment"),
LONG_TEST_ENTRY("test.long", "hive.test.long", 42, "comment"),
DOUBLE_TEST_ENTRY("test.double", "hive.test.double", 3.141592654, "comment"),
TIME_TEST_ENTRY("test.time", "hive.test.time", 1, TimeUnit.SECONDS, "comment"),
@@ -935,6 +972,10 @@ public class MetastoreConf {
return hiveName;
}
+ public Object getDefaultVal() {
+ return defaultVal;
+ }
+
@Override
public String toString() {
return varname;
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java
index 230c0d3..a6da7df 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java
@@ -18,28 +18,42 @@
package org.apache.hadoop.hive.metastore.events;
-import java.util.TimerTask;
-
-import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HiveMetaStore;
+import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hive.metastore.RawStore;
-public class EventCleanerTask extends TimerTask{
+import java.util.concurrent.TimeUnit;
+
+public class EventCleanerTask implements MetastoreTaskThread {
+ private static final Logger LOG = LoggerFactory.getLogger(EventCleanerTask.class);
+
+ private Configuration conf;
+
+ @Override
+ public long runFrequency(TimeUnit unit) {
+ return MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.EVENT_CLEAN_FREQ, unit);
+ }
+
+ @Override
+ public void setConf(Configuration configuration) {
+ conf = configuration;
- public static final Logger LOG = LoggerFactory.getLogger(EventCleanerTask.class);
- private final IHMSHandler handler;
+ }
- public EventCleanerTask(IHMSHandler handler) {
- super();
- this.handler = handler;
+ @Override
+ public Configuration getConf() {
+ return conf;
}
@Override
public void run() {
try {
- RawStore ms = handler.getMS();
+ RawStore ms = HiveMetaStore.HMSHandler.getMSForConf(conf);
long deleteCnt = ms.cleanupEvents();
if (deleteCnt > 0L){
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java
index e031dbb..56eb9ed 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore.events;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.IHMSHandler;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
@@ -100,6 +101,15 @@ public abstract class ListenerEvent {
}
/**
+ * You should use {@link #getIHMSHandler()} instead.
+ * @return handler.
+ */
+ @Deprecated
+ public HiveMetaStore.HMSHandler getHandler() {
+ return (HiveMetaStore.HMSHandler)handler;
+ }
+
+ /**
* @return the handler
*/
public IHMSHandler getIHMSHandler() {
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
index 65084bd..eefb505 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
@@ -40,8 +40,7 @@ public interface JDOConnectionURLHook {
* @return the connection URL
* @throws Exception
*/
- public String getJdoConnectionUrl(Configuration conf)
- throws Exception;
+ String getJdoConnectionUrl(Configuration conf) throws Exception;
/**
* Alerts this that the connection URL was bad. Can be used to collect stats,
@@ -49,5 +48,5 @@ public interface JDOConnectionURLHook {
*
* @param url
*/
- public void notifyBadConnectionUrl(String url);
+ void notifyBadConnectionUrl(String url);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidCompactionHistoryService.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidCompactionHistoryService.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidCompactionHistoryService.java
index 97bff0e..e96a7ba 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidCompactionHistoryService.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidCompactionHistoryService.java
@@ -18,14 +18,17 @@
package org.apache.hadoop.hive.metastore.txn;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.RunnableConfigurable;
+import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.concurrent.TimeUnit;
+
/**
* Purges obsolete items from compaction history data
*/
-public class AcidCompactionHistoryService implements RunnableConfigurable {
+public class AcidCompactionHistoryService implements MetastoreTaskThread {
private static final Logger LOG = LoggerFactory.getLogger(AcidCompactionHistoryService.class);
private Configuration conf;
@@ -43,6 +46,12 @@ public class AcidCompactionHistoryService implements RunnableConfigurable {
}
@Override
+ public long runFrequency(TimeUnit unit) {
+ return MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_REAPER_INTERVAL,
+ unit);
+ }
+
+ @Override
public void run() {
TxnStore.MutexAPI.LockHandle handle = null;
try {
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidHouseKeeperService.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidHouseKeeperService.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidHouseKeeperService.java
index 7450a2f..c4a488b 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidHouseKeeperService.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidHouseKeeperService.java
@@ -18,15 +18,18 @@
package org.apache.hadoop.hive.metastore.txn;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.RunnableConfigurable;
+import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.concurrent.TimeUnit;
+
/**
* Performs background tasks for Transaction management in Hive.
* Runs inside Hive Metastore Service.
*/
-public class AcidHouseKeeperService implements RunnableConfigurable {
+public class AcidHouseKeeperService implements MetastoreTaskThread {
private static final Logger LOG = LoggerFactory.getLogger(AcidHouseKeeperService.class);
private Configuration conf;
@@ -44,6 +47,11 @@ public class AcidHouseKeeperService implements RunnableConfigurable {
}
@Override
+ public long runFrequency(TimeUnit unit) {
+ return MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.TIMEDOUT_TXN_REAPER_INTERVAL, unit);
+ }
+
+ @Override
public void run() {
TxnStore.MutexAPI.LockHandle handle = null;
try {
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java
index e3f7eca..2ad5a89 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java
@@ -17,15 +17,18 @@
*/
package org.apache.hadoop.hive.metastore.txn;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.RunnableConfigurable;
+import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.concurrent.TimeUnit;
+
/**
* Background running thread, periodically updating number of open transactions.
* Runs inside Hive Metastore Service.
*/
-public class AcidOpenTxnsCounterService implements RunnableConfigurable {
+public class AcidOpenTxnsCounterService implements MetastoreTaskThread {
private static final Logger LOG = LoggerFactory.getLogger(AcidOpenTxnsCounterService.class);
private Configuration conf;
@@ -34,6 +37,11 @@ public class AcidOpenTxnsCounterService implements RunnableConfigurable {
private TxnStore txnHandler;
@Override
+ public long runFrequency(TimeUnit unit) {
+ return MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.COUNT_OPEN_TXNS_INTERVAL, unit);
+ }
+
+ @Override
public void run() {
try {
long startTime = System.currentTimeMillis();
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidWriteSetService.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidWriteSetService.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidWriteSetService.java
index 413fe96..5ec513d 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidWriteSetService.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidWriteSetService.java
@@ -18,14 +18,17 @@
package org.apache.hadoop.hive.metastore.txn;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.RunnableConfigurable;
+import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.concurrent.TimeUnit;
+
/**
* Periodically cleans WriteSet tracking information used in Transaction management
*/
-public class AcidWriteSetService implements RunnableConfigurable {
+public class AcidWriteSetService implements MetastoreTaskThread {
private static final Logger LOG = LoggerFactory.getLogger(AcidWriteSetService.class);
private Configuration conf;
@@ -43,6 +46,11 @@ public class AcidWriteSetService implements RunnableConfigurable {
}
@Override
+ public long runFrequency(TimeUnit unit) {
+ return MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.WRITE_SET_REAPER_INTERVAL, unit);
+ }
+
+ @Override
public void run() {
TxnStore.MutexAPI.LockHandle handle = null;
try {
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index e45bfb4..06f49de 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -28,8 +28,6 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.classification.RetrySemantics;
import org.apache.hadoop.hive.metastore.DatabaseProduct;
-import org.apache.hadoop.hive.metastore.RunnableConfigurable;
-import org.apache.hadoop.hive.metastore.ThreadPool;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.datasource.BoneCPDataSourceProvider;
import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider;
@@ -59,7 +57,6 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import java.util.regex.Pattern;
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/CommonCliOptions.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/CommonCliOptions.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/CommonCliOptions.java
new file mode 100644
index 0000000..24e4ebe
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/CommonCliOptions.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.utils;
+
+import java.util.Properties;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.logging.log4j.Level;
+
+/**
+ * Reusable code for Hive Cli's.
+ * <p>
+ * Basic usage is: create an instance (usually a subclass if you want to
+ * all your own options or processing instructions), parse, and then use
+ * the resulting information.
+ * <p>
+ * See org.apache.hadoop.hive.service.HiveServer or
+ * org.apache.hadoop.hive.metastore.HiveMetaStore
+ * for examples of use.
+ *
+ */
+public class CommonCliOptions {
+ /**
+ * Options for parsing the command line.
+ */
+ protected final Options OPTIONS = new Options();
+
+ protected CommandLine commandLine;
+
+ /**
+ * The name of this cli.
+ */
+ protected final String cliname;
+
+ private boolean verbose = false;
+
+ /**
+ * Create an instance with common options (help, verbose, etc...).
+ *
+ * @param cliname the name of the command
+ * @param includeHiveConf include "hiveconf" as an option if true
+ */
+ @SuppressWarnings("static-access")
+ public CommonCliOptions(String cliname, boolean includeHiveConf) {
+ this.cliname = cliname;
+
+ // [-v|--verbose]
+ OPTIONS.addOption(new Option("v", "verbose", false, "Verbose mode"));
+
+ // [-h|--help]
+ OPTIONS.addOption(new Option("h", "help", false, "Print help information"));
+
+ if (includeHiveConf) {
+ OPTIONS.addOption(OptionBuilder
+ .withValueSeparator()
+ .hasArgs(2)
+ .withArgName("property=value")
+ .withLongOpt("hiveconf")
+ .withDescription("Use value for given property")
+ .create());
+ }
+ }
+
+ /**
+ * Add the hiveconf properties to the Java system properties, override
+ * anything therein.
+ *
+ * @return a copy of the properties specified in hiveconf
+ */
+ public Properties addHiveconfToSystemProperties() {
+ Properties confProps = commandLine.getOptionProperties("hiveconf");
+ for (String propKey : confProps.stringPropertyNames()) {
+ if (verbose) {
+ System.err.println(
+ "hiveconf: " + propKey + "=" + confProps.getProperty(propKey));
+ }
+ if (propKey.equalsIgnoreCase("hive.root.logger")) {
+ splitAndSetLogger(propKey, confProps);
+ } else {
+ System.setProperty(propKey, confProps.getProperty(propKey));
+ }
+ }
+ return confProps;
+ }
+
+ public static void splitAndSetLogger(final String propKey, final Properties confProps) {
+ String propVal = confProps.getProperty(propKey);
+ if (propVal.contains(",")) {
+ String[] tokens = propVal.split(",");
+ for (String token : tokens) {
+ if (Level.getLevel(token) == null) {
+ System.setProperty("hive.root.logger", token);
+ } else {
+ System.setProperty("hive.log.level", token);
+ }
+ }
+ } else {
+ System.setProperty(propKey, confProps.getProperty(propKey));
+ }
+ }
+
+ /**
+ * Print usage information for the CLI.
+ */
+ public void printUsage() {
+ new HelpFormatter().printHelp(cliname, OPTIONS);
+ }
+
+ /**
+ * Parse the arguments.
+ * @param args
+ */
+ public void parse(String[] args) {
+ try {
+ commandLine = new GnuParser().parse(OPTIONS, args);
+
+ if (commandLine.hasOption('h')) {
+ printUsage();
+ System.exit(1);
+ }
+ if (commandLine.hasOption('v')) {
+ verbose = true;
+ }
+ } catch (ParseException e) {
+ System.err.println(e.getMessage());
+ printUsage();
+ System.exit(1);
+ }
+
+ }
+
+ /**
+ * Should the client be verbose.
+ */
+ public boolean isVerbose() {
+ return verbose;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
index 0543274..5dcedcd 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.slf4j.Logger;
@@ -37,6 +38,12 @@ import java.util.Collections;
import java.util.List;
public class FileUtils {
+ private static final PathFilter SNAPSHOT_DIR_PATH_FILTER = new PathFilter() {
+ @Override
+ public boolean accept(Path p) {
+ return ".snapshot".equalsIgnoreCase(p.getName());
+ }
+ };
private static final Logger LOG = LoggerFactory.getLogger(FileUtils.class);
public static final PathFilter HIDDEN_FILES_PATH_FILTER = new PathFilter() {
@@ -391,4 +398,30 @@ public class FileUtils {
//Once equality has been added in HDFS-9159, we should make use of it
return fs1.getUri().equals(fs2.getUri());
}
+
+ /**
+ * Check if the path contains a subdirectory named '.snapshot'
+ * @param p path to check
+ * @param fs filesystem of the path
+ * @return true if p contains a subdirectory named '.snapshot'
+ * @throws IOException
+ */
+ public static boolean pathHasSnapshotSubDir(Path p, FileSystem fs) throws IOException {
+ // Hadoop is missing a public API to check for snapshotable directories. Check with the directory name
+ // until a more appropriate API is provided by HDFS-12257.
+ final FileStatus[] statuses = fs.listStatus(p, FileUtils.SNAPSHOT_DIR_PATH_FILTER);
+ return statuses != null && statuses.length != 0;
+ }
+
+ public static void makeDir(Path path, Configuration conf) throws MetaException {
+ FileSystem fs;
+ try {
+ fs = path.getFileSystem(conf);
+ if (!fs.exists(path)) {
+ fs.mkdirs(path);
+ }
+ } catch (IOException e) {
+ throw new MetaException("Unable to : " + path);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
index 8d560e7..6a76de5 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
@@ -18,10 +18,20 @@
package org.apache.hadoop.hive.metastore.utils;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Objects;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
+import org.apache.commons.lang.ArrayUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -40,6 +50,7 @@ import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
@@ -220,4 +231,166 @@ public class HdfsUtils {
}
return (DistributedFileSystem)fs;
}
+
+ public static class HadoopFileStatus {
+
+ private final FileStatus fileStatus;
+ private final AclStatus aclStatus;
+
+ public HadoopFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException {
+
+ FileStatus fileStatus = fs.getFileStatus(file);
+ AclStatus aclStatus = null;
+ if (Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true")) {
+ //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless.
+ try {
+ aclStatus = fs.getAclStatus(file);
+ } catch (Exception e) {
+ LOG.info("Skipping ACL inheritance: File system for path " + file + " " +
+ "does not support ACLs but dfs.namenode.acls.enabled is set to true. ");
+ LOG.debug("The details are: " + e, e);
+ }
+ }this.fileStatus = fileStatus;
+ this.aclStatus = aclStatus;
+ }
+
+ public FileStatus getFileStatus() {
+ return fileStatus;
+ }
+
+ List<AclEntry> getAclEntries() {
+ return aclStatus == null ? null : Collections.unmodifiableList(aclStatus.getEntries());
+ }
+
+ @VisibleForTesting
+ AclStatus getAclStatus() {
+ return this.aclStatus;
+ }
+ }
+
+ /**
+ * Copy the permissions, group, and ACLs from a source {@link HadoopFileStatus} to a target {@link Path}. This method
+ * will only log a warning if permissions cannot be set, no exception will be thrown.
+ *
+ * @param conf the {@link Configuration} used when setting permissions and ACLs
+ * @param sourceStatus the source {@link HadoopFileStatus} to copy permissions and ACLs from
+ * @param targetGroup the group of the target {@link Path}, if this is set and it is equal to the source group, an
+ * extra set group operation is avoided
+ * @param fs the {@link FileSystem} that contains the target {@link Path}
+ * @param target the {@link Path} to copy permissions, group, and ACLs to
+ * @param recursion recursively set permissions and ACLs on the target {@link Path}
+ */
+ public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus,
+ String targetGroup, FileSystem fs, Path target, boolean recursion) {
+ setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, recursion, recursion ? new FsShell() : null);
+ }
+
+ @VisibleForTesting
+ static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus,
+ String targetGroup, FileSystem fs, Path target, boolean recursion, FsShell fsShell) {
+ try {
+ FileStatus fStatus = sourceStatus.getFileStatus();
+ String group = fStatus.getGroup();
+ boolean aclEnabled = Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true");
+ FsPermission sourcePerm = fStatus.getPermission();
+ List<AclEntry> aclEntries = null;
+ if (aclEnabled) {
+ if (sourceStatus.getAclEntries() != null) {
+ LOG.trace(sourceStatus.getAclStatus().toString());
+ aclEntries = new ArrayList<>(sourceStatus.getAclEntries());
+ removeBaseAclEntries(aclEntries);
+
+ //the ACL api's also expect the tradition user/group/other permission in the form of ACL
+ aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction()));
+ aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction()));
+ aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction()));
+ }
+ }
+
+ if (recursion) {
+ //use FsShell to change group, permissions, and extended ACL's recursively
+ fsShell.setConf(conf);
+ //If there is no group of a file, no need to call chgrp
+ if (group != null && !group.isEmpty()) {
+ run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()});
+ }
+ if (aclEnabled) {
+ if (null != aclEntries) {
+ //Attempt extended Acl operations only if its enabled, 8791but don't fail the operation regardless.
+ try {
+ //construct the -setfacl command
+ String aclEntry = Joiner.on(",").join(aclEntries);
+ run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()});
+
+ } catch (Exception e) {
+ LOG.info("Skipping ACL inheritance: File system for path " + target + " " +
+ "does not support ACLs but dfs.namenode.acls.enabled is set to true. ");
+ LOG.debug("The details are: " + e, e);
+ }
+ }
+ } else {
+ String permission = Integer.toString(sourcePerm.toShort(), 8);
+ run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()});
+ }
+ } else {
+ if (group != null && !group.isEmpty()) {
+ if (targetGroup == null ||
+ !group.equals(targetGroup)) {
+ fs.setOwner(target, null, group);
+ }
+ }
+ if (aclEnabled) {
+ if (null != aclEntries) {
+ fs.setAcl(target, aclEntries);
+ }
+ } else {
+ fs.setPermission(target, sourcePerm);
+ }
+ }
+ } catch (Exception e) {
+ LOG.warn(
+ "Unable to inherit permissions for file " + target + " from file " + sourceStatus.getFileStatus().getPath(),
+ e.getMessage());
+ LOG.debug("Exception while inheriting permissions", e);
+ }
+ }
+
+ /**
+ * Removes basic permission acls (unamed acls) from the list of acl entries
+ * @param entries acl entries to remove from.
+ */
+ private static void removeBaseAclEntries(List<AclEntry> entries) {
+ Iterables.removeIf(entries, new Predicate<AclEntry>() {
+ @Override
+ public boolean apply(AclEntry input) {
+ if (input.getName() == null) {
+ return true;
+ }
+ return false;
+ }
+ });
+ }
+
+ /**
+ * Create a new AclEntry with scope, type and permission (no name).
+ *
+ * @param scope
+ * AclEntryScope scope of the ACL entry
+ * @param type
+ * AclEntryType ACL entry type
+ * @param permission
+ * FsAction set of permissions in the ACL entry
+ * @return AclEntry new AclEntry
+ */
+ private static AclEntry newAclEntry(AclEntryScope scope, AclEntryType type,
+ FsAction permission) {
+ return new AclEntry.Builder().setScope(scope).setType(type)
+ .setPermission(permission).build();
+ }
+
+ private static void run(FsShell shell, String[] command) throws Exception {
+ LOG.debug(ArrayUtils.toString(command));
+ int retval = shell.run(command);
+ LOG.debug("Return value is :" + retval);
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java
new file mode 100644
index 0000000..06fe6cb
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.utils;
+
+import java.io.File;
+import java.net.URL;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.config.Configurator;
+import org.apache.logging.log4j.core.impl.Log4jContextFactory;
+import org.apache.logging.log4j.spi.DefaultThreadContextMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utilities common to logging operations.
+ */
+public class LogUtils {
+
+ private static final String HIVE_L4J = "hive-log4j2.properties";
+ private static final Logger l4j = LoggerFactory.getLogger(LogUtils.class);
+
+ @SuppressWarnings("serial")
+ public static class LogInitializationException extends Exception {
+ LogInitializationException(String msg) {
+ super(msg);
+ }
+ }
+
+ /**
+ * Initialize log4j.
+ *
+ * @return an message suitable for display to the user
+ * @throws LogInitializationException if log4j fails to initialize correctly
+ */
+ public static String initHiveLog4j(Configuration conf)
+ throws LogInitializationException {
+ return initHiveLog4jCommon(conf, MetastoreConf.ConfVars.LOG4J_FILE);
+ }
+
+ private static String initHiveLog4jCommon(Configuration conf, ConfVars confVarName)
+ throws LogInitializationException {
+ if (MetastoreConf.getVar(conf, confVarName).equals("")) {
+ // if log4j configuration file not set, or could not found, use default setting
+ return initHiveLog4jDefault(conf, "", confVarName);
+ } else {
+ // if log4j configuration file found successfully, use HiveConf property value
+ String log4jFileName = MetastoreConf.getVar(conf, confVarName);
+ File log4jConfigFile = new File(log4jFileName);
+ boolean fileExists = log4jConfigFile.exists();
+ if (!fileExists) {
+ // if property specified file not found in local file system
+ // use default setting
+ return initHiveLog4jDefault(
+ conf, "Not able to find conf file: " + log4jConfigFile, confVarName);
+ } else {
+ // property speficied file found in local file system
+ // use the specified file
+ final boolean async = checkAndSetAsyncLogging(conf);
+ // required for MDC based routing appender so that child threads can inherit the MDC context
+ System.setProperty(DefaultThreadContextMap.INHERITABLE_MAP, "true");
+ Configurator.initialize(null, log4jFileName);
+ logConfigLocation();
+ return "Logging initialized using configuration in " + log4jConfigFile + " Async: " + async;
+ }
+ }
+ }
+
+ private static boolean checkAndSetAsyncLogging(final Configuration conf) {
+ final boolean asyncLogging = MetastoreConf.getBoolVar(conf, ConfVars.ASYNC_LOG_ENABLED);
+ if (asyncLogging) {
+ System.setProperty("Log4jContextSelector",
+ "org.apache.logging.log4j.core.async.AsyncLoggerContextSelector");
+ // default is ClassLoaderContextSelector which is created during automatic logging
+ // initialization in a static initialization block.
+ // Changing ContextSelector at runtime requires creating new context factory which will
+ // internally create new context selector based on system property.
+ LogManager.setFactory(new Log4jContextFactory());
+ }
+ return asyncLogging;
+ }
+
+ private static String initHiveLog4jDefault(Configuration conf, String logMessage, ConfVars confVarName)
+ throws LogInitializationException {
+ URL hive_l4j = null;
+ switch (confVarName) {
+ case LOG4J_FILE:
+ hive_l4j = LogUtils.class.getClassLoader().getResource(HIVE_L4J);
+ break;
+ default:
+ break;
+ }
+ if (hive_l4j != null) {
+ final boolean async = checkAndSetAsyncLogging(conf);
+ System.setProperty(DefaultThreadContextMap.INHERITABLE_MAP, "true");
+ Configurator.initialize(null, hive_l4j.toString());
+ logConfigLocation();
+ return (logMessage + "\n" + "Logging initialized using configuration in " + hive_l4j +
+ " Async: " + async);
+ } else {
+ throw new LogInitializationException(
+ logMessage + "Unable to initialize logging using "
+ + LogUtils.HIVE_L4J + ", not found on CLASSPATH!");
+ }
+ }
+
+ private static void logConfigLocation() throws LogInitializationException {
+ // Log a warning if hive-default.xml is found on the classpath
+ if (MetastoreConf.getHiveDefaultLocation() != null) {
+ l4j.warn("DEPRECATED: Ignoring hive-default.xml found on the CLASSPATH at "
+ + MetastoreConf.getHiveDefaultLocation().getPath());
+ }
+ // Look for hive-site.xml on the CLASSPATH and log its location if found.
+ if (MetastoreConf.getHiveSiteLocation() == null) {
+ l4j.warn("hive-site.xml not found on CLASSPATH");
+ } else {
+ l4j.debug("Using hive-site.xml found on CLASSPATH at "
+ + MetastoreConf.getHiveSiteLocation().getPath());
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
index 1dd3e7e..beee86f 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
@@ -22,9 +22,12 @@ import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.commons.collections.ListUtils;
-import org.apache.commons.lang.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.metastore.ColumnType;
import org.apache.hadoop.hive.metastore.TableType;
@@ -35,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.Decimal;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.metastore.api.Partition;
@@ -45,26 +49,38 @@ import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator;
import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory;
+import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMerger;
+import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMergerFactory;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.util.MachineList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
+import java.io.File;
+import java.lang.reflect.InvocationTargetException;
import java.math.BigDecimal;
import java.math.BigInteger;
+import java.net.URL;
+import java.net.URLClassLoader;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.Set;
import java.util.SortedMap;
import java.util.SortedSet;
import java.util.TreeMap;
@@ -206,12 +222,12 @@ public class MetaStoreUtils {
singleObj.add(obj);
ColumnStatistics singleCS = new ColumnStatistics(css.getStatsDesc(), singleObj);
if (!map.containsKey(obj.getColName())) {
- map.put(obj.getColName(), new ArrayList<ColumnStatistics>());
+ map.put(obj.getColName(), new ArrayList<>());
}
map.get(obj.getColName()).add(singleCS);
}
}
- return MetaStoreUtils.aggrPartitionStats(map,dbName,tableName,partNames,colNames,useDensityFunctionForNDVEstimation, ndvTuner);
+ return aggrPartitionStats(map,dbName,tableName,partNames,colNames,useDensityFunctionForNDVEstimation, ndvTuner);
}
public static List<ColumnStatisticsObj> aggrPartitionStats(
@@ -404,7 +420,7 @@ public class MetaStoreUtils {
* if it doesn't match the pattern.
*/
public static boolean validateName(String name, Configuration conf) {
- Pattern tpat = null;
+ Pattern tpat;
String allowedCharacters = "\\w_";
if (conf != null
&& MetastoreConf.getBoolVar(conf,
@@ -493,7 +509,7 @@ public class MetaStoreUtils {
return false;
}
- if (MetaStoreUtils.isView(tbl)) {
+ if (isView(tbl)) {
return false;
}
@@ -605,7 +621,7 @@ public class MetaStoreUtils {
params == null ||
!containsAllFastStats(params)) {
if (params == null) {
- params = new HashMap<String,String>();
+ params = new HashMap<>();
}
if (!newDir) {
// The table location already exists and may contain data.
@@ -704,7 +720,7 @@ public class MetaStoreUtils {
params == null ||
!containsAllFastStats(params)) {
if (params == null) {
- params = new HashMap<String,String>();
+ params = new HashMap<>();
}
if (!madeDir) {
// The partition location already existed and may contain data. Lets try to
@@ -731,7 +747,7 @@ public class MetaStoreUtils {
return false;
}
- Map<String, String> columnNameTypePairMap = new HashMap<String, String>(newCols.size());
+ Map<String, String> columnNameTypePairMap = new HashMap<>(newCols.size());
for (FieldSchema newCol : newCols) {
columnNameTypePairMap.put(newCol.getName().toLowerCase(), newCol.getType());
}
@@ -750,4 +766,313 @@ public class MetaStoreUtils {
String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp));
}
+
+ /**
+ * create listener instances as per the configuration.
+ *
+ * @param clazz Class of the listener
+ * @param conf configuration object
+ * @param listenerImplList Implementation class name
+ * @return instance of the listener
+ * @throws MetaException if there is any failure instantiating the class
+ */
+ public static <T> List<T> getMetaStoreListeners(Class<T> clazz,
+ Configuration conf, String listenerImplList) throws MetaException {
+ List<T> listeners = new ArrayList<T>();
+
+ if (StringUtils.isBlank(listenerImplList)) {
+ return listeners;
+ }
+
+ String[] listenerImpls = listenerImplList.split(",");
+ for (String listenerImpl : listenerImpls) {
+ try {
+ T listener = (T) Class.forName(
+ listenerImpl.trim(), true, JavaUtils.getClassLoader()).getConstructor(
+ Configuration.class).newInstance(conf);
+ listeners.add(listener);
+ } catch (InvocationTargetException ie) {
+ throw new MetaException("Failed to instantiate listener named: "+
+ listenerImpl + ", reason: " + ie.getCause());
+ } catch (Exception e) {
+ throw new MetaException("Failed to instantiate listener named: "+
+ listenerImpl + ", reason: " + e);
+ }
+ }
+
+ return listeners;
+ }
+
+ public static String validateSkewedColNames(List<String> cols) {
+ if (CollectionUtils.isEmpty(cols)) {
+ return null;
+ }
+ for (String col : cols) {
+ if (!validateColumnName(col)) {
+ return col;
+ }
+ }
+ return null;
+ }
+
+ public static String validateSkewedColNamesSubsetCol(List<String> skewedColNames,
+ List<FieldSchema> cols) {
+ if (CollectionUtils.isEmpty(skewedColNames)) {
+ return null;
+ }
+ List<String> colNames = new ArrayList<>(cols.size());
+ for (FieldSchema fieldSchema : cols) {
+ colNames.add(fieldSchema.getName());
+ }
+ // make a copy
+ List<String> copySkewedColNames = new ArrayList<>(skewedColNames);
+ // remove valid columns
+ copySkewedColNames.removeAll(colNames);
+ if (copySkewedColNames.isEmpty()) {
+ return null;
+ }
+ return copySkewedColNames.toString();
+ }
+
+ public static boolean isNonNativeTable(Table table) {
+ if (table == null || table.getParameters() == null) {
+ return false;
+ }
+ return (table.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE) != null);
+ }
+
+ public static boolean isIndexTable(Table table) {
+ if (table == null) {
+ return false;
+ }
+ return TableType.INDEX_TABLE.toString().equals(table.getTableType());
+ }
+
+ /**
+ * Given a list of partition columns and a partial mapping from
+ * some partition columns to values the function returns the values
+ * for the column.
+ * @param partCols the list of table partition columns
+ * @param partSpec the partial mapping from partition column to values
+ * @return list of values of for given partition columns, any missing
+ * values in partSpec is replaced by an empty string
+ */
+ public static List<String> getPvals(List<FieldSchema> partCols,
+ Map<String, String> partSpec) {
+ List<String> pvals = new ArrayList<>(partCols.size());
+ for (FieldSchema field : partCols) {
+ String val = StringUtils.defaultString(partSpec.get(field.getName()));
+ pvals.add(val);
+ }
+ return pvals;
+ }
+
+ /**
+ * @param schema1: The first schema to be compared
+ * @param schema2: The second schema to be compared
+ * @return true if the two schemas are the same else false
+ * for comparing a field we ignore the comment it has
+ */
+ public static boolean compareFieldColumns(List<FieldSchema> schema1, List<FieldSchema> schema2) {
+ if (schema1.size() != schema2.size()) {
+ return false;
+ }
+ Iterator<FieldSchema> its1 = schema1.iterator();
+ Iterator<FieldSchema> its2 = schema2.iterator();
+ while (its1.hasNext()) {
+ FieldSchema f1 = its1.next();
+ FieldSchema f2 = its2.next();
+ // The default equals provided by thrift compares the comments too for
+ // equality, thus we need to compare the relevant fields here.
+ if (!StringUtils.equals(f1.getName(), f2.getName()) ||
+ !StringUtils.equals(f1.getType(), f2.getType())) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public static boolean isArchived(Partition part) {
+ Map<String, String> params = part.getParameters();
+ return "TRUE".equalsIgnoreCase(params.get(hive_metastoreConstants.IS_ARCHIVED));
+ }
+
+ public static Path getOriginalLocation(Partition part) {
+ Map<String, String> params = part.getParameters();
+ assert(isArchived(part));
+ String originalLocation = params.get(hive_metastoreConstants.ORIGINAL_LOCATION);
+ assert( originalLocation != null);
+
+ return new Path(originalLocation);
+ }
+
+ private static String ARCHIVING_LEVEL = "archiving_level";
+ public static int getArchivingLevel(Partition part) throws MetaException {
+ if (!isArchived(part)) {
+ throw new MetaException("Getting level of unarchived partition");
+ }
+
+ String lv = part.getParameters().get(ARCHIVING_LEVEL);
+ if (lv != null) {
+ return Integer.parseInt(lv);
+ }
+ // partitions archived before introducing multiple archiving
+ return part.getValues().size();
+ }
+
+ public static boolean partitionNameHasValidCharacters(List<String> partVals,
+ Pattern partitionValidationPattern) {
+ return getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern) == null;
+ }
+
+ public static void getMergableCols(ColumnStatistics csNew, Map<String, String> parameters) {
+ List<ColumnStatisticsObj> list = new ArrayList<>();
+ for (int index = 0; index < csNew.getStatsObj().size(); index++) {
+ ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index);
+ // canColumnStatsMerge guarantees that it is accurate before we do merge
+ if (StatsSetupConst.canColumnStatsMerge(parameters, statsObjNew.getColName())) {
+ list.add(statsObjNew);
+ }
+ // in all the other cases, we can not merge
+ }
+ csNew.setStatsObj(list);
+ }
+
+ // this function will merge csOld into csNew.
+ public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld)
+ throws InvalidObjectException {
+ List<ColumnStatisticsObj> list = new ArrayList<>();
+ if (csNew.getStatsObj().size() != csOld.getStatsObjSize()) {
+ // Some of the columns' stats are missing
+ // This implies partition schema has changed. We will merge columns
+ // present in both, overwrite stats for columns absent in metastore and
+ // leave alone columns stats missing from stats task. This last case may
+ // leave stats in stale state. This will be addressed later.
+ LOG.debug("New ColumnStats size is {}, but old ColumnStats size is {}",
+ csNew.getStatsObj().size(), csOld.getStatsObjSize());
+ }
+ // In this case, we have to find out which columns can be merged.
+ Map<String, ColumnStatisticsObj> map = new HashMap<>();
+ // We build a hash map from colName to object for old ColumnStats.
+ for (ColumnStatisticsObj obj : csOld.getStatsObj()) {
+ map.put(obj.getColName(), obj);
+ }
+ for (int index = 0; index < csNew.getStatsObj().size(); index++) {
+ ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index);
+ ColumnStatisticsObj statsObjOld = map.get(statsObjNew.getColName());
+ if (statsObjOld != null) {
+ // If statsObjOld is found, we can merge.
+ ColumnStatsMerger merger = ColumnStatsMergerFactory.getColumnStatsMerger(statsObjNew,
+ statsObjOld);
+ merger.merge(statsObjNew, statsObjOld);
+ }
+ list.add(statsObjNew);
+ }
+ csNew.setStatsObj(list);
+ }
+
+ /**
+ * Read and return the meta store Sasl configuration. Currently it uses the default
+ * Hadoop SASL configuration and can be configured using "hadoop.rpc.protection"
+ * HADOOP-10211, made a backward incompatible change due to which this call doesn't
+ * work with Hadoop 2.4.0 and later.
+ * @param conf
+ * @return The SASL configuration
+ */
+ public static Map<String, String> getMetaStoreSaslProperties(Configuration conf, boolean useSSL) {
+ // As of now Hive Meta Store uses the same configuration as Hadoop SASL configuration
+
+ // If SSL is enabled, override the given value of "hadoop.rpc.protection" and set it to "authentication"
+ // This disables any encryption provided by SASL, since SSL already provides it
+ String hadoopRpcProtectionVal = conf.get(CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION);
+ String hadoopRpcProtectionAuth = SaslRpcServer.QualityOfProtection.AUTHENTICATION.toString();
+
+ if (useSSL && hadoopRpcProtectionVal != null && !hadoopRpcProtectionVal.equals(hadoopRpcProtectionAuth)) {
+ LOG.warn("Overriding value of " + CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION + " setting it from "
+ + hadoopRpcProtectionVal + " to " + hadoopRpcProtectionAuth + " because SSL is enabled");
+ conf.set(CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION, hadoopRpcProtectionAuth);
+ }
+ return HadoopThriftAuthBridge.getBridge().getHadoopSaslProperties(conf);
+ }
+
+ /**
+ * Add new elements to the classpath.
+ *
+ * @param newPaths
+ * Array of classpath elements
+ */
+ public static ClassLoader addToClassPath(ClassLoader cloader, String[] newPaths) throws Exception {
+ URLClassLoader loader = (URLClassLoader) cloader;
+ List<URL> curPath = Arrays.asList(loader.getURLs());
+ ArrayList<URL> newPath = new ArrayList<>(curPath.size());
+
+ // get a list with the current classpath components
+ for (URL onePath : curPath) {
+ newPath.add(onePath);
+ }
+ curPath = newPath;
+
+ for (String onestr : newPaths) {
+ URL oneurl = urlFromPathString(onestr);
+ if (oneurl != null && !curPath.contains(oneurl)) {
+ curPath.add(oneurl);
+ }
+ }
+
+ return new URLClassLoader(curPath.toArray(new URL[0]), loader);
+ }
+
+ /**
+ * Create a URL from a string representing a path to a local file.
+ * The path string can be just a path, or can start with file:/, file:///
+ * @param onestr path string
+ * @return
+ */
+ private static URL urlFromPathString(String onestr) {
+ URL oneurl = null;
+ try {
+ if (onestr.startsWith("file:/")) {
+ oneurl = new URL(onestr);
+ } else {
+ oneurl = new File(onestr).toURL();
+ }
+ } catch (Exception err) {
+ LOG.error("Bad URL " + onestr + ", ignoring path");
+ }
+ return oneurl;
+ }
+
+ /**
+ * Verify if the user is allowed to make DB notification related calls.
+ * Only the superusers defined in the Hadoop proxy user settings have the permission.
+ *
+ * @param user the short user name
+ * @param conf that contains the proxy user settings
+ * @return if the user has the permission
+ */
+ public static boolean checkUserHasHostProxyPrivileges(String user, Configuration conf, String ipAddress) {
+ DefaultImpersonationProvider sip = ProxyUsers.getDefaultImpersonationProvider();
+ // Just need to initialize the ProxyUsers for the first time, given that the conf will not change on the fly
+ if (sip == null) {
+ ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+ sip = ProxyUsers.getDefaultImpersonationProvider();
+ }
+ Map<String, Collection<String>> proxyHosts = sip.getProxyHosts();
+ Collection<String> hostEntries = proxyHosts.get(sip.getProxySuperuserIpConfKey(user));
+ MachineList machineList = new MachineList(hostEntries);
+ ipAddress = (ipAddress == null) ? StringUtils.EMPTY : ipAddress;
+ return machineList.includes(ipAddress);
+ }
+
+ // TODO This should be moved to MetaStoreTestUtils once it is moved into standalone-metastore.
+ /**
+ * Setup a configuration file for standalone mode. There are a few config variables that have
+ * defaults that require parts of Hive that aren't present in standalone mode. This method
+ * sets them to something that will work without the rest of Hive.
+ * @param conf Configuration object
+ */
+ public static void setConfForStandloneMode(Configuration conf) {
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.TASK_THREADS_ALWAYS,
+ EventCleanerTask.class.getName());
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/8fcc7f32/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
index b05c995..41a18cb 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
@@ -34,14 +34,28 @@ import org.apache.hadoop.security.token.TokenSelector;
import org.apache.zookeeper.client.ZooKeeperSaslClient;
import javax.security.auth.login.AppConfigurationEntry;
+import org.apache.thrift.transport.TSSLTransportFactory;
+import org.apache.thrift.transport.TServerSocket;
+import org.apache.thrift.transport.TTransportException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.net.ssl.SSLServerSocket;
import javax.security.auth.login.LoginException;
import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
public class SecurityUtils {
+ private static final Logger LOG = LoggerFactory.getLogger(SecurityUtils.class);
+
public static UserGroupInformation getUGI() throws LoginException, IOException {
String doAs = System.getenv("HADOOP_USER_NAME");
if (doAs != null && doAs.length() > 0) {
@@ -209,4 +223,65 @@ public class SecurityUtils {
return tokenStoreClass;
}
}
+
+
+ /**
+ * @return the user name set in hadoop.job.ugi param or the current user from System
+ * @throws IOException if underlying Hadoop call throws LoginException
+ */
+ public static String getUser() throws IOException {
+ try {
+ UserGroupInformation ugi = getUGI();
+ return ugi.getUserName();
+ } catch (LoginException le) {
+ throw new IOException(le);
+ }
+ }
+
+ public static TServerSocket getServerSocket(String hiveHost, int portNum) throws TTransportException {
+ InetSocketAddress serverAddress;
+ if (hiveHost == null || hiveHost.isEmpty()) {
+ // Wildcard bind
+ serverAddress = new InetSocketAddress(portNum);
+ } else {
+ serverAddress = new InetSocketAddress(hiveHost, portNum);
+ }
+ return new TServerSocket(serverAddress);
+ }
+
+ public static TServerSocket getServerSSLSocket(String hiveHost, int portNum, String keyStorePath,
+ String keyStorePassWord, List<String> sslVersionBlacklist) throws TTransportException,
+ UnknownHostException {
+ TSSLTransportFactory.TSSLTransportParameters params =
+ new TSSLTransportFactory.TSSLTransportParameters();
+ params.setKeyStore(keyStorePath, keyStorePassWord);
+ InetSocketAddress serverAddress;
+ if (hiveHost == null || hiveHost.isEmpty()) {
+ // Wildcard bind
+ serverAddress = new InetSocketAddress(portNum);
+ } else {
+ serverAddress = new InetSocketAddress(hiveHost, portNum);
+ }
+ TServerSocket thriftServerSocket =
+ TSSLTransportFactory.getServerSocket(portNum, 0, serverAddress.getAddress(), params);
+ if (thriftServerSocket.getServerSocket() instanceof SSLServerSocket) {
+ List<String> sslVersionBlacklistLocal = new ArrayList<>();
+ for (String sslVersion : sslVersionBlacklist) {
+ sslVersionBlacklistLocal.add(sslVersion.trim().toLowerCase());
+ }
+ SSLServerSocket sslServerSocket = (SSLServerSocket) thriftServerSocket.getServerSocket();
+ List<String> enabledProtocols = new ArrayList<>();
+ for (String protocol : sslServerSocket.getEnabledProtocols()) {
+ if (sslVersionBlacklistLocal.contains(protocol.toLowerCase())) {
+ LOG.debug("Disabling SSL Protocol: " + protocol);
+ } else {
+ enabledProtocols.add(protocol);
+ }
+ }
+ sslServerSocket.setEnabledProtocols(enabledProtocols.toArray(new String[0]));
+ LOG.info("SSL Server Socket Enabled Protocols: "
+ + Arrays.toString(sslServerSocket.getEnabledProtocols()));
+ }
+ return thriftServerSocket;
+ }
}