You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2017/11/03 22:49:49 UTC
[5/7] hive git commit: HIVE-17967 Move HiveMetaStore class
http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index a491789..939ae21 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -18,15 +18,12 @@
package org.apache.hadoop.hive.metastore;
-import java.io.File;
import java.io.IOException;
import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.net.URL;
-import java.net.URLClassLoader;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -38,13 +35,8 @@ import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hive.metastore.api.Decimal;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.shims.ShimLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
@@ -54,28 +46,18 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMerger;
-import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMergerFactory;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.SerDeUtils;
-import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -83,16 +65,9 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.security.SaslRpcServer;
-import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.util.MachineList;
-import org.apache.hive.common.util.HiveStringUtils;
import org.apache.hive.common.util.ReflectionUtil;
-import javax.annotation.Nullable;
-
public class MetaStoreUtils {
private static final Logger LOG = LoggerFactory.getLogger("hive.log");
@@ -105,241 +80,20 @@ public class MetaStoreUtils {
// HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES in HiveConf as well.
public static final char[] specialCharactersInTableNames = new char[] { '/' };
- public static Table createColumnsetSchema(String name, List<String> columns,
- List<String> partCols, Configuration conf) throws MetaException {
-
- if (columns == null) {
- throw new MetaException("columns not specified for table " + name);
- }
-
- Table tTable = new Table();
- tTable.setTableName(name);
- tTable.setSd(new StorageDescriptor());
- StorageDescriptor sd = tTable.getSd();
- sd.setSerdeInfo(new SerDeInfo());
- SerDeInfo serdeInfo = sd.getSerdeInfo();
- serdeInfo.setSerializationLib(LazySimpleSerDe.class.getName());
- serdeInfo.setParameters(new HashMap<String, String>());
- serdeInfo.getParameters().put(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT,
- Warehouse.DEFAULT_SERIALIZATION_FORMAT);
-
- List<FieldSchema> fields = new ArrayList<FieldSchema>(columns.size());
- sd.setCols(fields);
- for (String col : columns) {
- FieldSchema field = new FieldSchema(col,
- org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME, "'default'");
- fields.add(field);
- }
-
- tTable.setPartitionKeys(new ArrayList<FieldSchema>());
- for (String partCol : partCols) {
- FieldSchema part = new FieldSchema();
- part.setName(partCol);
- part.setType(org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME); // default
- // partition
- // key
- tTable.getPartitionKeys().add(part);
- }
- sd.setNumBuckets(-1);
- return tTable;
- }
-
- /**
- * recursiveDelete
- *
- * just recursively deletes a dir - you'd think Java would have something to
- * do this??
- *
- * @param f
- * - the file/dir to delete
- * @exception IOException
- * propogate f.delete() exceptions
- *
- */
- static public void recursiveDelete(File f) throws IOException {
- if (f.isDirectory()) {
- File fs[] = f.listFiles();
- for (File subf : fs) {
- recursiveDelete(subf);
- }
- }
- if (!f.delete()) {
- throw new IOException("could not delete: " + f.getPath());
- }
- }
-
- /**
- * @param partParams
- * @return True if the passed Parameters Map contains values for all "Fast Stats".
- */
- private static boolean containsAllFastStats(Map<String, String> partParams) {
- for (String stat : StatsSetupConst.fastStats) {
- if (!partParams.containsKey(stat)) {
- return false;
- }
- }
- return true;
- }
-
- static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh,
- boolean madeDir, EnvironmentContext environmentContext) throws MetaException {
- return updateTableStatsFast(db, tbl, wh, madeDir, false, environmentContext);
- }
-
- private static boolean updateTableStatsFast(Database db, Table tbl, Warehouse wh,
- boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
- if (tbl.getPartitionKeysSize() == 0) {
- // Update stats only when unpartitioned
- FileStatus[] fileStatuses = wh.getFileStatusesForUnpartitionedTable(db, tbl);
- return updateTableStatsFast(tbl, fileStatuses, madeDir, forceRecompute, environmentContext);
- } else {
- return false;
- }
- }
-
- /**
- * Updates the numFiles and totalSize parameters for the passed Table by querying
- * the warehouse if the passed Table does not already have values for these parameters.
- * @param tbl
- * @param fileStatus
- * @param newDir if true, the directory was just created and can be assumed to be empty
- * @param forceRecompute Recompute stats even if the passed Table already has
- * these parameters set
- * @return true if the stats were updated, false otherwise
- */
- public static boolean updateTableStatsFast(Table tbl, FileStatus[] fileStatus, boolean newDir,
- boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
-
- Map<String,String> params = tbl.getParameters();
-
- if ((params!=null) && params.containsKey(StatsSetupConst.DO_NOT_UPDATE_STATS)){
- boolean doNotUpdateStats = Boolean.valueOf(params.get(StatsSetupConst.DO_NOT_UPDATE_STATS));
- params.remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
- tbl.setParameters(params); // to make sure we remove this marker property
- if (doNotUpdateStats){
- return false;
- }
- }
-
- boolean updated = false;
- if (forceRecompute ||
- params == null ||
- !containsAllFastStats(params)) {
- if (params == null) {
- params = new HashMap<String,String>();
- }
- if (!newDir) {
- // The table location already exists and may contain data.
- // Let's try to populate those stats that don't require full scan.
- LOG.info("Updating table stats fast for " + tbl.getTableName());
- populateQuickStats(fileStatus, params);
- LOG.info("Updated size of table " + tbl.getTableName() +" to "+ params.get(StatsSetupConst.TOTAL_SIZE));
- if (environmentContext != null
- && environmentContext.isSetProperties()
- && StatsSetupConst.TASK.equals(environmentContext.getProperties().get(
- StatsSetupConst.STATS_GENERATED))) {
- StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE);
- } else {
- StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE);
- }
- }
- tbl.setParameters(params);
- updated = true;
- }
- return updated;
- }
-
public static void populateQuickStats(FileStatus[] fileStatus, Map<String, String> params) {
- int numFiles = 0;
- long tableSize = 0L;
- String s = "LOG14535 Populating quick stats for: ";
- for (FileStatus status : fileStatus) {
- s += status.getPath() + ", ";
- // don't take directories into account for quick stats
- if (!status.isDir()) {
- tableSize += status.getLen();
- numFiles += 1;
- }
- }
- LOG.info(s/*, new Exception()*/);
- params.put(StatsSetupConst.NUM_FILES, Integer.toString(numFiles));
- params.put(StatsSetupConst.TOTAL_SIZE, Long.toString(tableSize));
+ org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.populateQuickStats(fileStatus, params);
}
- static boolean updatePartitionStatsFast(Partition part, Warehouse wh, EnvironmentContext environmentContext)
- throws MetaException {
- return updatePartitionStatsFast(part, wh, false, false, environmentContext);
+ public static boolean updateTableStatsFast(Table tbl, FileStatus[] fileStatus, boolean newDir,
+ boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
+ return org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.updateTableStatsFast(
+ tbl, fileStatus, newDir, forceRecompute, environmentContext);
}
- static boolean updatePartitionStatsFast(Partition part, Warehouse wh, boolean madeDir, EnvironmentContext environmentContext)
+ public static boolean updatePartitionStatsFast(Partition part, Warehouse wh, EnvironmentContext environmentContext)
throws MetaException {
- return updatePartitionStatsFast(part, wh, madeDir, false, environmentContext);
- }
-
- /**
- * Updates the numFiles and totalSize parameters for the passed Partition by querying
- * the warehouse if the passed Partition does not already have values for these parameters.
- * @param part
- * @param wh
- * @param madeDir if true, the directory was just created and can be assumed to be empty
- * @param forceRecompute Recompute stats even if the passed Partition already has
- * these parameters set
- * @return true if the stats were updated, false otherwise
- */
- private static boolean updatePartitionStatsFast(Partition part, Warehouse wh,
- boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
- return updatePartitionStatsFast(new PartitionSpecProxy.SimplePartitionWrapperIterator(part),
- wh, madeDir, forceRecompute, environmentContext);
- }
-
- /**
- * Updates the numFiles and totalSize parameters for the passed Partition by querying
- * the warehouse if the passed Partition does not already have values for these parameters.
- * @param part
- * @param wh
- * @param madeDir if true, the directory was just created and can be assumed to be empty
- * @param forceRecompute Recompute stats even if the passed Partition already has
- * these parameters set
- * @return true if the stats were updated, false otherwise
- */
- static boolean updatePartitionStatsFast(PartitionSpecProxy.PartitionIterator part, Warehouse wh,
- boolean madeDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException {
- Map<String,String> params = part.getParameters();
- boolean updated = false;
- if (forceRecompute ||
- params == null ||
- !containsAllFastStats(params)) {
- if (params == null) {
- params = new HashMap<String,String>();
- }
- if (!madeDir) {
- // The partition location already existed and may contain data. Lets try to
- // populate those statistics that don't require a full scan of the data.
- LOG.warn("Updating partition stats fast for: " + part.getTableName());
- FileStatus[] fileStatus = wh.getFileStatusesForLocation(part.getLocation());
- populateQuickStats(fileStatus, params);
- LOG.warn("Updated size to " + params.get(StatsSetupConst.TOTAL_SIZE));
- updateBasicState(environmentContext, params);
- }
- part.setParameters(params);
- updated = true;
- }
- return updated;
- }
-
- private static void updateBasicState(EnvironmentContext environmentContext, Map<String,String>
- params) {
- if (params == null) {
- return;
- }
- if (environmentContext != null
- && environmentContext.isSetProperties()
- && StatsSetupConst.TASK.equals(environmentContext.getProperties().get(
- StatsSetupConst.STATS_GENERATED))) {
- StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE);
- } else {
- StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE);
- }
+ return org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.updatePartitionStatsFast(
+ part, wh, environmentContext);
}
/**
@@ -436,53 +190,6 @@ public class MetaStoreUtils {
}
}
- static public void deleteWHDirectory(Path path, Configuration conf,
- boolean use_trash) throws MetaException {
-
- try {
- if (!path.getFileSystem(conf).exists(path)) {
- LOG.warn("drop data called on table/partition with no directory: "
- + path);
- return;
- }
-
- if (use_trash) {
-
- int count = 0;
- Path newPath = new Path("/Trash/Current"
- + path.getParent().toUri().getPath());
-
- if (path.getFileSystem(conf).exists(newPath) == false) {
- path.getFileSystem(conf).mkdirs(newPath);
- }
-
- do {
- newPath = new Path("/Trash/Current" + path.toUri().getPath() + "."
- + count);
- if (path.getFileSystem(conf).exists(newPath)) {
- count++;
- continue;
- }
- if (path.getFileSystem(conf).rename(path, newPath)) {
- break;
- }
- } while (++count < 50);
- if (count >= 50) {
- throw new MetaException("Rename failed due to maxing out retries");
- }
- } else {
- // directly delete it
- path.getFileSystem(conf).delete(path, true);
- }
- } catch (IOException e) {
- LOG.error("Got exception trying to delete data dir: " + e);
- throw new MetaException(e.getMessage());
- } catch (MetaException e) {
- LOG.error("Got exception trying to delete data dir: " + e);
- throw e;
- }
- }
-
/**
* Given a list of partition columns and a partial mapping from
* some partition columns to values the function returns the values
@@ -537,118 +244,12 @@ public class MetaStoreUtils {
return true;
}
- static public String validateTblColumns(List<FieldSchema> cols) {
- for (FieldSchema fieldSchema : cols) {
- if (!validateColumnName(fieldSchema.getName())) {
- return "name: " + fieldSchema.getName();
- }
- String typeError = validateColumnType(fieldSchema.getType());
- if (typeError != null) {
- return typeError;
- }
- }
- return null;
- }
-
- /**
- * @return true if oldType and newType are compatible.
- * Two types are compatible if we have internal functions to cast one to another.
- */
- static private boolean areColTypesCompatible(String oldType, String newType) {
-
- /*
- * RCFile default serde (ColumnarSerde) serializes the values in such a way that the
- * datatypes can be converted from string to any type. The map is also serialized as
- * a string, which can be read as a string as well. However, with any binary
- * serialization, this is not true.
- *
- * Primitive types like INT, STRING, BIGINT, etc are compatible with each other and are
- * not blocked.
- */
-
- return TypeInfoUtils.implicitConvertible(TypeInfoUtils.getTypeInfoFromTypeString(oldType),
- TypeInfoUtils.getTypeInfoFromTypeString(newType));
- }
-
public static final String TYPE_FROM_DESERIALIZER = "<derived from deserializer>";
- /**
- * validate column type
- *
- * if it is predefined, yes. otherwise no
- * @param type
- * @return
- */
- static public String validateColumnType(String type) {
- if (type.equals(TYPE_FROM_DESERIALIZER)) {
- return null;
- }
- int last = 0;
- boolean lastAlphaDigit = isValidTypeChar(type.charAt(last));
- for (int i = 1; i <= type.length(); i++) {
- if (i == type.length()
- || isValidTypeChar(type.charAt(i)) != lastAlphaDigit) {
- String token = type.substring(last, i);
- last = i;
- if (!hiveThriftTypeMap.contains(token)) {
- return "type: " + type;
- }
- break;
- }
- }
- return null;
- }
-
- private static boolean isValidTypeChar(char c) {
- return Character.isLetterOrDigit(c) || c == '_';
- }
-
- public static String validateSkewedColNames(List<String> cols) {
- if (CollectionUtils.isEmpty(cols)) {
- return null;
- }
- for (String col : cols) {
- if (!validateColumnName(col)) {
- return col;
- }
- }
- return null;
- }
-
- public static String validateSkewedColNamesSubsetCol(List<String> skewedColNames,
- List<FieldSchema> cols) {
- if (CollectionUtils.isEmpty(skewedColNames)) {
- return null;
- }
- List<String> colNames = new ArrayList<String>(cols.size());
- for (FieldSchema fieldSchema : cols) {
- colNames.add(fieldSchema.getName());
- }
- // make a copy
- List<String> copySkewedColNames = new ArrayList<String>(skewedColNames);
- // remove valid columns
- copySkewedColNames.removeAll(colNames);
- if (copySkewedColNames.isEmpty()) {
- return null;
- }
- return copySkewedColNames.toString();
- }
public static String getListType(String t) {
return "array<" + t + ">";
}
- public static String getMapType(String k, String v) {
- return "map<" + k + "," + v + ">";
- }
-
- public static void setSerdeParam(SerDeInfo sdi, Properties schema,
- String param) {
- String val = schema.getProperty(param);
- if (org.apache.commons.lang.StringUtils.isNotBlank(val)) {
- sdi.getParameters().put(param, val);
- }
- }
-
static HashMap<String, String> typeToThriftTypeMap;
static {
typeToThriftTypeMap = new HashMap<String, String>();
@@ -726,42 +327,6 @@ public class MetaStoreUtils {
}
/**
- * Convert FieldSchemas to Thrift DDL + column names and column types
- *
- * @param structName
- * The name of the table
- * @param fieldSchemas
- * List of fields along with their schemas
- * @return String containing "Thrift
- * DDL#comma-separated-column-names#colon-separated-columntypes
- * Example:
- * "struct result { a string, map<int,string> b}#a,b#string:map<int,string>"
- */
- public static String getFullDDLFromFieldSchema(String structName,
- List<FieldSchema> fieldSchemas) {
- StringBuilder ddl = new StringBuilder();
- ddl.append(getDDLFromFieldSchema(structName, fieldSchemas));
- ddl.append('#');
- StringBuilder colnames = new StringBuilder();
- StringBuilder coltypes = new StringBuilder();
- boolean first = true;
- for (FieldSchema col : fieldSchemas) {
- if (first) {
- first = false;
- } else {
- colnames.append(',');
- coltypes.append(':');
- }
- colnames.append(col.getName());
- coltypes.append(col.getType());
- }
- ddl.append(colnames);
- ddl.append('#');
- ddl.append(coltypes);
- return ddl.toString();
- }
-
- /**
* Convert FieldSchemas to Thrift DDL.
*/
public static String getDDLFromFieldSchema(String structName,
@@ -1107,15 +672,131 @@ public class MetaStoreUtils {
return sb.toString();
}
- public static void makeDir(Path path, HiveConf hiveConf) throws MetaException {
- FileSystem fs;
+ public static int startMetaStore() throws Exception {
+ return startMetaStore(HadoopThriftAuthBridge.getBridge(), null);
+ }
+
+ public static int startMetaStore(final HadoopThriftAuthBridge bridge, HiveConf conf) throws Exception {
+ int port = findFreePort();
+ startMetaStore(port, bridge, conf);
+ return port;
+ }
+
+ public static int startMetaStore(HiveConf conf) throws Exception {
+ return startMetaStore(HadoopThriftAuthBridge.getBridge(), conf);
+ }
+
+ public static void startMetaStore(final int port, final HadoopThriftAuthBridge bridge) throws Exception {
+ startMetaStore(port, bridge, null);
+ }
+
+ public static void startMetaStore(final int port,
+ final HadoopThriftAuthBridge bridge, HiveConf hiveConf)
+ throws Exception{
+ if (hiveConf == null) {
+ hiveConf = new HiveConf(HMSHandler.class);
+ }
+ final HiveConf finalHiveConf = hiveConf;
+ Thread thread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ HiveMetaStore.startMetaStore(port, bridge, finalHiveConf);
+ } catch (Throwable e) {
+ LOG.error("Metastore Thrift Server threw an exception...",e);
+ }
+ }
+ });
+ thread.setDaemon(true);
+ thread.start();
+ loopUntilHMSReady(port);
+ }
+
+ /**
+ * A simple connect test to make sure that the metastore is up
+ * @throws Exception
+ */
+ private static void loopUntilHMSReady(int port) throws Exception {
+ int retries = 0;
+ Exception exc = null;
+ while (true) {
+ try {
+ Socket socket = new Socket();
+ socket.connect(new InetSocketAddress(port), 5000);
+ socket.close();
+ return;
+ } catch (Exception e) {
+ if (retries++ > 60) { //give up
+ exc = e;
+ break;
+ }
+ Thread.sleep(1000);
+ }
+ }
+ // something is preventing metastore from starting
+ // print the stack from all threads for debugging purposes
+ LOG.error("Unable to connect to metastore server: " + exc.getMessage());
+ LOG.info("Printing all thread stack traces for debugging before throwing exception.");
+ LOG.info(getAllThreadStacksAsString());
+ throw exc;
+ }
+
+ private static String getAllThreadStacksAsString() {
+ Map<Thread, StackTraceElement[]> threadStacks = Thread.getAllStackTraces();
+ StringBuilder sb = new StringBuilder();
+ for (Map.Entry<Thread, StackTraceElement[]> entry : threadStacks.entrySet()) {
+ Thread t = entry.getKey();
+ sb.append(System.lineSeparator());
+ sb.append("Name: ").append(t.getName()).append(" State: ").append(t.getState());
+ addStackString(entry.getValue(), sb);
+ }
+ return sb.toString();
+ }
+
+ private static void addStackString(StackTraceElement[] stackElems, StringBuilder sb) {
+ sb.append(System.lineSeparator());
+ for (StackTraceElement stackElem : stackElems) {
+ sb.append(stackElem).append(System.lineSeparator());
+ }
+ }
+
+ /**
+ * Finds a free port on the machine.
+ *
+ * @return
+ * @throws IOException
+ */
+ public static int findFreePort() throws IOException {
+ ServerSocket socket= new ServerSocket(0);
+ int port = socket.getLocalPort();
+ socket.close();
+ return port;
+ }
+
+ /**
+ * Finds a free port on the machine, but allow the
+ * ability to specify a port number to not use, no matter what.
+ */
+ public static int findFreePortExcepting(int portToExclude) throws IOException {
+ ServerSocket socket1 = null;
+ ServerSocket socket2 = null;
try {
- fs = path.getFileSystem(hiveConf);
- if (!fs.exists(path)) {
- fs.mkdirs(path);
+ socket1 = new ServerSocket(0);
+ socket2 = new ServerSocket(0);
+ if (socket1.getLocalPort() != portToExclude) {
+ return socket1.getLocalPort();
+ }
+ // If we're here, then socket1.getLocalPort was the port to exclude
+ // Since both sockets were open together at a point in time, we're
+ // guaranteed that socket2.getLocalPort() is not the same.
+ return socket2.getLocalPort();
+ } finally {
+ if (socket1 != null){
+ socket1.close();
+ }
+ if (socket2 != null){
+ socket2.close();
}
- } catch (IOException e) {
- throw new MetaException("Unable to : " + path);
}
}
@@ -1225,52 +906,12 @@ public class MetaStoreUtils {
return "TRUE".equalsIgnoreCase(params.get("EXTERNAL"));
}
- /**
- * Determines whether a table is an immutable table.
- * Immutable tables are write-once/replace, and do not support append. Partitioned
- * immutable tables do support additions by way of creation of new partitions, but
- * do not allow the partitions themselves to be appended to. "INSERT INTO" will not
- * work for Immutable tables.
- *
- * @param table table of interest
- *
- * @return true if immutable
- */
- public static boolean isImmutableTable(Table table) {
- if (table == null){
- return false;
- }
- Map<String, String> params = table.getParameters();
- if (params == null) {
- return false;
- }
-
- return "TRUE".equalsIgnoreCase(params.get(hive_metastoreConstants.IS_IMMUTABLE));
- }
-
public static boolean isArchived(
org.apache.hadoop.hive.metastore.api.Partition part) {
Map<String, String> params = part.getParameters();
return "TRUE".equalsIgnoreCase(params.get(hive_metastoreConstants.IS_ARCHIVED));
}
- public static Path getOriginalLocation(
- org.apache.hadoop.hive.metastore.api.Partition part) {
- Map<String, String> params = part.getParameters();
- assert(isArchived(part));
- String originalLocation = params.get(hive_metastoreConstants.ORIGINAL_LOCATION);
- assert( originalLocation != null);
-
- return new Path(originalLocation);
- }
-
- public static boolean isNonNativeTable(Table table) {
- if (table == null || table.getParameters() == null) {
- return false;
- }
- return (table.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE) != null);
- }
-
/**
* Filter that filters out hidden files
*/
@@ -1301,29 +942,6 @@ public class MetaStoreUtils {
return true;
}
- /**
- * Returns true if partial has the same values as full for all values that
- * aren't empty in partial.
- */
-
- public static boolean pvalMatches(List<String> partial, List<String> full) {
- if(partial.size() > full.size()) {
- return false;
- }
- Iterator<String> p = partial.iterator();
- Iterator<String> f = full.iterator();
-
- while(p.hasNext()) {
- String pval = p.next();
- String fval = f.next();
-
- if (pval.length() != 0 && !pval.equals(fval)) {
- return false;
- }
- }
- return true;
- }
-
public static String getIndexTableName(String dbName, String baseTblName, String indexName) {
return dbName + "__" + baseTblName + "_" + indexName + "__";
}
@@ -1342,26 +960,6 @@ public class MetaStoreUtils {
return TableType.MATERIALIZED_VIEW.toString().equals(table.getTableType());
}
- /**
- * Given a map of partition column names to values, this creates a filter
- * string that can be used to call the *byFilter methods
- * @param m
- * @return the filter string
- */
- public static String makeFilterStringFromMap(Map<String, String> m) {
- StringBuilder filter = new StringBuilder();
- for (Entry<String, String> e : m.entrySet()) {
- String col = e.getKey();
- String val = e.getValue();
- if (filter.length() == 0) {
- filter.append(col + "=\"" + val + "\"");
- } else {
- filter.append(" and " + col + "=\"" + val + "\"");
- }
- }
- return filter.toString();
- }
-
public static boolean isView(Table table) {
if (table == null) {
return false;
@@ -1369,42 +967,6 @@ public class MetaStoreUtils {
return TableType.VIRTUAL_VIEW.toString().equals(table.getTableType());
}
- /**
- * create listener instances as per the configuration.
- *
- * @param clazz
- * @param conf
- * @param listenerImplList
- * @return
- * @throws MetaException
- */
- static <T> List<T> getMetaStoreListeners(Class<T> clazz,
- HiveConf conf, String listenerImplList) throws MetaException {
- List<T> listeners = new ArrayList<T>();
-
- if (StringUtils.isBlank(listenerImplList)) {
- return listeners;
- }
-
- String[] listenerImpls = listenerImplList.split(",");
- for (String listenerImpl : listenerImpls) {
- try {
- T listener = (T) Class.forName(
- listenerImpl.trim(), true, JavaUtils.getClassLoader()).getConstructor(
- Configuration.class).newInstance(conf);
- listeners.add(listener);
- } catch (InvocationTargetException ie) {
- throw new MetaException("Failed to instantiate listener named: "+
- listenerImpl + ", reason: " + ie.getCause());
- } catch (Exception e) {
- throw new MetaException("Failed to instantiate listener named: "+
- listenerImpl + ", reason: " + e);
- }
- }
-
- return listeners;
- }
-
@SuppressWarnings("unchecked")
public static Class<? extends RawStore> getClass(String rawStoreClassName)
throws MetaException {
@@ -1448,24 +1010,6 @@ public class MetaStoreUtils {
}
}
- public static void validatePartitionNameCharacters(List<String> partVals,
- Pattern partitionValidationPattern) throws MetaException {
-
- String invalidPartitionVal =
- HiveStringUtils.getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern);
- if (invalidPartitionVal != null) {
- throw new MetaException("Partition value '" + invalidPartitionVal +
- "' contains a character " + "not matched by whitelist pattern '" +
- partitionValidationPattern.toString() + "'. " + "(configure with " +
- HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.varname + ")");
- }
- }
-
- public static boolean partitionNameHasValidCharacters(List<String> partVals,
- Pattern partitionValidationPattern) {
- return HiveStringUtils.getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern) == null;
- }
-
/**
* @param schema1: The first schema to be compared
* @param schema2: The second schema to be compared
@@ -1538,97 +1082,6 @@ public class MetaStoreUtils {
return names;
}
- /**
- * Helper function to transform Nulls to empty strings.
- */
- private static final com.google.common.base.Function<String,String> transFormNullsToEmptyString
- = new com.google.common.base.Function<String, String>() {
- @Override
- public java.lang.String apply(@Nullable java.lang.String string) {
- return StringUtils.defaultString(string);
- }
- };
-
- /**
- * Create a URL from a string representing a path to a local file.
- * The path string can be just a path, or can start with file:/, file:///
- * @param onestr path string
- * @return
- */
- private static URL urlFromPathString(String onestr) {
- URL oneurl = null;
- try {
- if (onestr.startsWith("file:/")) {
- oneurl = new URL(onestr);
- } else {
- oneurl = new File(onestr).toURL();
- }
- } catch (Exception err) {
- LOG.error("Bad URL " + onestr + ", ignoring path");
- }
- return oneurl;
- }
-
- /**
- * Add new elements to the classpath.
- *
- * @param newPaths
- * Array of classpath elements
- */
- public static ClassLoader addToClassPath(ClassLoader cloader, String[] newPaths) throws Exception {
- URLClassLoader loader = (URLClassLoader) cloader;
- List<URL> curPath = Arrays.asList(loader.getURLs());
- ArrayList<URL> newPath = new ArrayList<URL>(curPath.size());
-
- // get a list with the current classpath components
- for (URL onePath : curPath) {
- newPath.add(onePath);
- }
- curPath = newPath;
-
- for (String onestr : newPaths) {
- URL oneurl = urlFromPathString(onestr);
- if (oneurl != null && !curPath.contains(oneurl)) {
- curPath.add(oneurl);
- }
- }
-
- return new URLClassLoader(curPath.toArray(new URL[0]), loader);
- }
-
- // this function will merge csOld into csNew.
- public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld)
- throws InvalidObjectException {
- List<ColumnStatisticsObj> list = new ArrayList<>();
- if (csNew.getStatsObj().size() != csOld.getStatsObjSize()) {
- // Some of the columns' stats are missing
- // This implies partition schema has changed. We will merge columns
- // present in both, overwrite stats for columns absent in metastore and
- // leave alone columns stats missing from stats task. This last case may
- // leave stats in stale state. This will be addressed later.
- LOG.debug("New ColumnStats size is {}, but old ColumnStats size is {}",
- csNew.getStatsObj().size(), csOld.getStatsObjSize());
- }
- // In this case, we have to find out which columns can be merged.
- Map<String, ColumnStatisticsObj> map = new HashMap<>();
- // We build a hash map from colName to object for old ColumnStats.
- for (ColumnStatisticsObj obj : csOld.getStatsObj()) {
- map.put(obj.getColName(), obj);
- }
- for (int index = 0; index < csNew.getStatsObj().size(); index++) {
- ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index);
- ColumnStatisticsObj statsObjOld = map.get(statsObjNew.getColName());
- if (statsObjOld != null) {
- // If statsObjOld is found, we can merge.
- ColumnStatsMerger merger = ColumnStatsMergerFactory.getColumnStatsMerger(statsObjNew,
- statsObjOld);
- merger.merge(statsObjNew, statsObjOld);
- }
- list.add(statsObjNew);
- }
- csNew.setStatsObj(list);
- }
-
public static List<String> getColumnNames(List<FieldSchema> schema) {
List<String> cols = new ArrayList<>(schema.size());
for (FieldSchema fs : schema) {
@@ -1636,32 +1089,4 @@ public class MetaStoreUtils {
}
return cols;
}
-
- /**
- * Verify if the user is allowed to make DB notification related calls.
- * Only the superusers defined in the Hadoop proxy user settings have the permission.
- *
- * @param user the short user name
- * @param conf that contains the proxy user settings
- * @return if the user has the permission
- */
- public static boolean checkUserHasHostProxyPrivileges(String user, Configuration conf, String ipAddress) {
- DefaultImpersonationProvider sip = ProxyUsers.getDefaultImpersonationProvider();
- // Just need to initialize the ProxyUsers for the first time, given that the conf will not change on the fly
- if (sip == null) {
- ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
- sip = ProxyUsers.getDefaultImpersonationProvider();
- }
- Map<String, Collection<String>> proxyHosts = sip.getProxyHosts();
- Collection<String> hostEntries = proxyHosts.get(sip.getProxySuperuserIpConfKey(user));
- MachineList machineList = new MachineList(hostEntries);
- ipAddress = (ipAddress == null) ? StringUtils.EMPTY : ipAddress;
- return machineList.includes(ipAddress);
- }
-
- /** Duplicates AcidUtils; used in a couple places in metastore. */
- public static boolean isInsertOnlyTableParam(Map<String, String> params) {
- String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
- return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp));
- }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java b/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java
new file mode 100644
index 0000000..80fae28
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/SerDeStorageSchemaReader.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.serde2.Deserializer;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.utils.StringUtils;
+
+import java.util.List;
+
+public class SerDeStorageSchemaReader implements StorageSchemaReader {
+ @Override
+ public List<FieldSchema> readSchema(Table tbl, EnvironmentContext envContext, Configuration conf)
+ throws MetaException {
+ ClassLoader orgHiveLoader = null;
+ try {
+ if (envContext != null) {
+ String addedJars = envContext.getProperties().get("hive.added.jars.path");
+ if (org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) {
+ //for thread safe
+ orgHiveLoader = conf.getClassLoader();
+ ClassLoader loader = org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.addToClassPath(
+ orgHiveLoader, org.apache.commons.lang.StringUtils.split(addedJars, ","));
+ conf.setClassLoader(loader);
+ }
+ }
+
+ Deserializer s = MetaStoreUtils.getDeserializer(conf, tbl, false);
+ return MetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), s);
+ } catch (Exception e) {
+ StringUtils.stringifyException(e);
+ throw new MetaException(e.getMessage());
+ } finally {
+ if (orgHiveLoader != null) {
+ conf.setClassLoader(orgHiveLoader);
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
deleted file mode 100644
index 38b0875..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.lang.reflect.InvocationTargetException;
-import java.net.Socket;
-
-import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
-import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
-import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.transport.TSocket;
-import org.apache.thrift.transport.TTransport;
-
-/**
- * TSetIpAddressProcessor passes the IP address of the Thrift client to the HMSHandler.
- */
-public class TSetIpAddressProcessor<I extends Iface> extends ThriftHiveMetastore.Processor<Iface> {
-
- @SuppressWarnings("unchecked")
- public TSetIpAddressProcessor(I iface) throws SecurityException, NoSuchFieldException,
- IllegalArgumentException, IllegalAccessException, NoSuchMethodException,
- InvocationTargetException {
- super(iface);
- }
-
- @Override
- public boolean process(final TProtocol in, final TProtocol out) throws TException {
- setIpAddress(in);
-
- return super.process(in, out);
- }
-
- protected void setIpAddress(final TProtocol in) {
- TTransport transport = in.getTransport();
- if (!(transport instanceof TSocket)) {
- return;
- }
- setIpAddress(((TSocket)transport).getSocket());
- }
-
- protected void setIpAddress(final Socket inSocket) {
- HMSHandler.setThreadLocalIpAddress(inSocket.getInetAddress().getHostAddress());
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
deleted file mode 100644
index 64f0b96..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.net.Socket;
-import java.security.PrivilegedExceptionAction;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface;
-import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.set_ugi_args;
-import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.set_ugi_result;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.thrift.ProcessFunction;
-import org.apache.thrift.TApplicationException;
-import org.apache.thrift.TBase;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TMessage;
-import org.apache.thrift.protocol.TMessageType;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.protocol.TProtocolException;
-import org.apache.thrift.protocol.TProtocolUtil;
-import org.apache.thrift.protocol.TType;
-
-/** TUGIBasedProcessor is used in unsecure mode for thrift metastore client server communication.
- * This processor checks whether the first rpc call after connection is set up is set_ugi()
- * through which client sends ugi to server. Processor then perform all subsequent rpcs on the
- * connection using ugi.doAs() so all actions are performed in client user context.
- * Note that old clients will never call set_ugi() and thus ugi will never be received on server
- * side, in which case server exhibits previous behavior and continues as usual.
- */
-@SuppressWarnings("rawtypes")
-public class TUGIBasedProcessor<I extends Iface> extends TSetIpAddressProcessor<Iface> {
-
- private final I iface;
- private final Map<String, org.apache.thrift.ProcessFunction<Iface, ? extends TBase>>
- functions;
- static final Logger LOG = LoggerFactory.getLogger(TUGIBasedProcessor.class);
-
- public TUGIBasedProcessor(I iface) throws SecurityException, NoSuchFieldException,
- IllegalArgumentException, IllegalAccessException, NoSuchMethodException,
- InvocationTargetException {
- super(iface);
- this.iface = iface;
- this.functions = getProcessMapView();
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public boolean process(final TProtocol in, final TProtocol out) throws TException {
- setIpAddress(in);
-
- final TMessage msg = in.readMessageBegin();
- final ProcessFunction<Iface, ? extends TBase> fn = functions.get(msg.name);
- if (fn == null) {
- TProtocolUtil.skip(in, TType.STRUCT);
- in.readMessageEnd();
- TApplicationException x = new TApplicationException(TApplicationException.UNKNOWN_METHOD,
- "Invalid method name: '"+msg.name+"'");
- out.writeMessageBegin(new TMessage(msg.name, TMessageType.EXCEPTION, msg.seqid));
- x.write(out);
- out.writeMessageEnd();
- out.getTransport().flush();
- return true;
- }
- TUGIContainingTransport ugiTrans = (TUGIContainingTransport)in.getTransport();
- // Store ugi in transport if the rpc is set_ugi
- if (msg.name.equalsIgnoreCase("set_ugi")){
- try {
- handleSetUGI(ugiTrans, (set_ugi<Iface>)fn, msg, in, out);
- } catch (TException e) {
- throw e;
- } catch (Exception e) {
- throw new TException(e.getCause());
- }
- return true;
- }
- UserGroupInformation clientUgi = ugiTrans.getClientUGI();
- if (null == clientUgi){
- // At this point, transport must contain client ugi, if it doesn't then its an old client.
- fn.process(msg.seqid, in, out, iface);
- return true;
- } else { // Found ugi, perform doAs().
- PrivilegedExceptionAction<Void> pvea = new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() {
- try {
- fn.process(msg.seqid,in, out, iface);
- return null;
- } catch (TException te) {
- throw new RuntimeException(te);
- }
- }
- };
- try {
- clientUgi.doAs(pvea);
- return true;
- } catch (RuntimeException rte) {
- if (rte.getCause() instanceof TException) {
- throw (TException)rte.getCause();
- }
- throw rte;
- } catch (InterruptedException ie) {
- throw new RuntimeException(ie); // unexpected!
- } catch (IOException ioe) {
- throw new RuntimeException(ioe); // unexpected!
- } finally {
- try {
- FileSystem.closeAllForUGI(clientUgi);
- } catch (IOException e) {
- LOG.error("Could not clean up file-system handles for UGI: " + clientUgi, e);
- }
- }
- }
- }
-
- private void handleSetUGI(TUGIContainingTransport ugiTrans,
- set_ugi<Iface> fn, TMessage msg, TProtocol iprot, TProtocol oprot)
- throws TException, SecurityException, NoSuchMethodException, IllegalArgumentException,
- IllegalAccessException, InvocationTargetException{
-
- UserGroupInformation clientUgi = ugiTrans.getClientUGI();
- if( null != clientUgi){
- throw new TException(new IllegalStateException("UGI is already set. Resetting is not " +
- "allowed. Current ugi is: " + clientUgi.getUserName()));
- }
-
- set_ugi_args args = fn.getEmptyArgsInstance();
- try {
- args.read(iprot);
- } catch (TProtocolException e) {
- iprot.readMessageEnd();
- TApplicationException x = new TApplicationException(TApplicationException.PROTOCOL_ERROR,
- e.getMessage());
- oprot.writeMessageBegin(new TMessage(msg.name, TMessageType.EXCEPTION, msg.seqid));
- x.write(oprot);
- oprot.writeMessageEnd();
- oprot.getTransport().flush();
- return;
- }
- iprot.readMessageEnd();
- set_ugi_result result = fn.getResult(iface, args);
- List<String> principals = result.getSuccess();
- // Store the ugi in transport and then continue as usual.
- ugiTrans.setClientUGI(UserGroupInformation.createRemoteUser(principals.remove(principals.size()-1)));
- oprot.writeMessageBegin(new TMessage(msg.name, TMessageType.REPLY, msg.seqid));
- result.write(oprot);
- oprot.writeMessageEnd();
- oprot.getTransport().flush();
- }
-
- @Override
- protected void setIpAddress(final TProtocol in) {
- TUGIContainingTransport ugiTrans = (TUGIContainingTransport)in.getTransport();
- Socket socket = ugiTrans.getSocket();
- if (socket != null) {
- setIpAddress(socket);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/metastore/src/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java b/metastore/src/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java
deleted file mode 100644
index 3c72c9c..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.repl;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.TimerTask;
-import java.util.concurrent.TimeUnit;
-
-public class DumpDirCleanerTask extends TimerTask {
- public static final Logger LOG = LoggerFactory.getLogger(DumpDirCleanerTask.class);
- private final HiveConf conf;
- private final Path dumpRoot;
- private final long ttl;
-
- public DumpDirCleanerTask(HiveConf conf) {
- this.conf = conf;
- dumpRoot = new Path(conf.getVar(HiveConf.ConfVars.REPLDIR));
- ttl = conf.getTimeVar(ConfVars.REPL_DUMPDIR_TTL, TimeUnit.MILLISECONDS);
- }
-
- @Override
- public void run() {
- LOG.debug("Trying to delete old dump dirs");
- try {
- FileSystem fs = FileSystem.get(dumpRoot.toUri(), conf);
- FileStatus[] statuses = fs.listStatus(dumpRoot);
- for (FileStatus status : statuses)
- {
- if (status.getModificationTime() < System.currentTimeMillis() - ttl)
- {
- fs.delete(status.getPath(), true);
- LOG.info("Deleted old dump dir: " + status.getPath());
- }
- }
- } catch (IOException e) {
- LOG.error("Error while trying to delete dump dir", e);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/metastore/src/test/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java
deleted file mode 100644
index 64cdfe0..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.hooks.JDOConnectionURLHook;
-
-/**
- *
- * DummyJdoConnectionUrlHook.
- *
- * An implementation of JDOConnectionURLHook which simply returns CORRECT_URL when
- * getJdoConnectionUrl is called.
- */
-public class DummyJdoConnectionUrlHook implements JDOConnectionURLHook {
-
- public static final String initialUrl = "BAD_URL";
- public static final String newUrl = "CORRECT_URL";
-
- @Override
- public String getJdoConnectionUrl(Configuration conf) throws Exception {
- return newUrl;
- }
-
- @Override
- public void notifyBadConnectionUrl(String url) {
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
deleted file mode 100644
index 84b70d8..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ /dev/null
@@ -1,1001 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-import org.junit.Assert;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
-import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.TableMeta;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
-import org.apache.thrift.TException;
-
-/**
- *
- * DummyRawStoreForJdoConnection.
- *
- * An implementation of RawStore that verifies the DummyJdoConnectionUrlHook has already been
- * applied when this class's setConf method is called, by checking that the value of the
- * METASTORECONNECTURLKEY ConfVar has been updated.
- *
- * All non-void methods return default values.
- */
-public class DummyRawStoreForJdoConnection implements RawStore {
-
- @Override
- public Configuration getConf() {
-
- return null;
- }
-
- @Override
- public void setConf(Configuration arg0) {
- String expected = DummyJdoConnectionUrlHook.newUrl;
- String actual = arg0.get(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname);
-
- Assert.assertEquals("The expected URL used by JDO to connect to the metastore: " + expected +
- " did not match the actual value when the Raw Store was initialized: " + actual,
- expected, actual);
- }
-
- @Override
- public void shutdown() {
-
-
- }
-
- @Override
- public boolean openTransaction() {
-
- return false;
- }
-
- @Override
- public boolean commitTransaction() {
- return false;
- }
-
- @Override
- public boolean isActiveTransaction() {
- return false;
- }
-
- @Override
- public void rollbackTransaction() {
- }
-
- @Override
- public void createDatabase(Database db) throws InvalidObjectException, MetaException {
-
-
- }
-
- @Override
- public Database getDatabase(String name) throws NoSuchObjectException {
-
- return null;
- }
-
- @Override
- public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException {
-
- return false;
- }
-
- @Override
- public boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException,
- MetaException {
-
- return false;
- }
-
- @Override
- public List<String> getDatabases(String pattern) throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<String> getAllDatabases() throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public boolean createType(Type type) {
-
- return false;
- }
-
- @Override
- public Type getType(String typeName) {
-
- return null;
- }
-
- @Override
- public boolean dropType(String typeName) {
-
- return false;
- }
-
- @Override
- public void createTable(Table tbl) throws InvalidObjectException, MetaException {
-
-
- }
-
- @Override
- public boolean dropTable(String dbName, String tableName) throws MetaException {
-
- return false;
- }
-
- @Override
- public Table getTable(String dbName, String tableName) throws MetaException {
-
- return null;
- }
-
- @Override
- public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
-
- return false;
- }
-
- @Override
- public Partition getPartition(String dbName, String tableName, List<String> part_vals)
- throws MetaException, NoSuchObjectException {
-
- return null;
- }
-
- @Override
- public boolean dropPartition(String dbName, String tableName, List<String> part_vals)
- throws MetaException {
-
- return false;
- }
-
- @Override
- public List<Partition> getPartitions(String dbName, String tableName, int max)
- throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException,
- MetaException {
-
-
- }
-
- @Override
- public List<String> getTables(String dbName, String pattern) throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<String> getTables(String dbName, String pattern, TableType tableType) throws MetaException {
- return Collections.emptyList();
- }
-
- @Override
- public List<TableMeta> getTableMeta(String dbNames, String tableNames, List<String> tableTypes)
- throws MetaException {
- return Collections.emptyList();
- }
-
- @Override
- public List<Table> getTableObjectsByName(String dbname, List<String> tableNames)
- throws MetaException, UnknownDBException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<String> getAllTables(String dbName) throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<String> listTableNamesByFilter(String dbName, String filter, short max_tables)
- throws MetaException, UnknownDBException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<String> listPartitionNames(String db_name, String tbl_name, short max_parts)
- throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, List<FieldSchema> cols, boolean applyDistinct, String filter, boolean ascending, List<FieldSchema> order, long maxParts) throws MetaException {
- return null;
- }
-
- @Override
- public List<String> listPartitionNamesByFilter(String db_name, String tbl_name, String filter,
- short max_parts) throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public void alterPartition(String db_name, String tbl_name, List<String> part_vals,
- Partition new_part) throws InvalidObjectException, MetaException {
-
-
- }
-
- @Override
- public void alterPartitions(String db_name, String tbl_name, List<List<String>> part_vals_list,
- List<Partition> new_parts) throws InvalidObjectException, MetaException {
-
-
- }
-
-
- @Override
- public boolean addIndex(Index index) throws InvalidObjectException, MetaException {
-
- return false;
- }
-
- @Override
- public Index getIndex(String dbName, String origTableName, String indexName)
- throws MetaException {
-
- return null;
- }
-
- @Override
- public boolean dropIndex(String dbName, String origTableName, String indexName)
- throws MetaException {
-
- return false;
- }
-
- @Override
- public List<Index> getIndexes(String dbName, String origTableName, int max)
- throws MetaException {
-
- return null;
- }
-
- @Override
- public List<String> listIndexNames(String dbName, String origTableName, short max)
- throws MetaException {
-
- return Collections.emptyList();
- }
-
- @Override
- public void alterIndex(String dbname, String baseTblName, String name, Index newIndex)
- throws InvalidObjectException, MetaException {
-
-
- }
-
- @Override
- public List<Partition> getPartitionsByFilter(String dbName, String tblName, String filter,
- short maxParts) throws MetaException, NoSuchObjectException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<Partition> getPartitionsByNames(String dbName, String tblName,
- List<String> partNames) throws MetaException, NoSuchObjectException {
-
- return Collections.emptyList();
- }
-
- @Override
- public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
- String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
- return false;
- }
-
- @Override
- public int getNumPartitionsByFilter(String dbName, String tblName, String filter)
- throws MetaException, NoSuchObjectException {
- return -1;
- }
-
- @Override
- public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr)
- throws MetaException, NoSuchObjectException {
- return -1;
- }
-
- @Override
- public Table markPartitionForEvent(String dbName, String tblName, Map<String, String> partVals,
- PartitionEventType evtType) throws MetaException, UnknownTableException,
- InvalidPartitionException, UnknownPartitionException {
-
- return null;
- }
-
- @Override
- public boolean isPartitionMarkedForEvent(String dbName, String tblName,
- Map<String, String> partName, PartitionEventType evtType) throws MetaException,
- UnknownTableException, InvalidPartitionException, UnknownPartitionException {
-
- return false;
- }
-
- @Override
- public boolean addRole(String rowName, String ownerName) throws InvalidObjectException,
- MetaException, NoSuchObjectException {
-
- return false;
- }
-
- @Override
- public boolean removeRole(String roleName) throws MetaException, NoSuchObjectException {
-
- return false;
- }
-
- @Override
- public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor,
- PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException,
- InvalidObjectException {
-
- return false;
- }
-
- @Override
- public boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption)
- throws MetaException, NoSuchObjectException {
-
- return false;
- }
-
- @Override
- public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List<String> groupNames)
- throws InvalidObjectException, MetaException {
-
- return null;
- }
-
- @Override
- public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName,
- List<String> groupNames) throws InvalidObjectException, MetaException {
-
- return null;
- }
-
- @Override
- public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName,
- String userName, List<String> groupNames) throws InvalidObjectException, MetaException {
-
- return null;
- }
-
- @Override
- public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName,
- String partition, String userName, List<String> groupNames) throws InvalidObjectException,
- MetaException {
-
- return null;
- }
-
- @Override
- public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName,
- String partitionName, String columnName, String userName, List<String> groupNames)
- throws InvalidObjectException, MetaException {
-
- return null;
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
- PrincipalType principalType) {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
- PrincipalType principalType, String dbName) {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
- PrincipalType principalType, String dbName, String tableName) {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalPartitionGrants(String principalName,
- PrincipalType principalType, String dbName, String tableName, List<String> partValues,
- String partName) {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(String principalName,
- PrincipalType principalType, String dbName, String tableName, String columnName) {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(String principalName,
- PrincipalType principalType, String dbName, String tableName, List<String> partVals,
- String partName, String columnName) {
-
- return Collections.emptyList();
- }
-
- @Override
- public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException,
- MetaException, NoSuchObjectException {
-
- return false;
- }
-
- @Override
- public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
- throws InvalidObjectException, MetaException, NoSuchObjectException {
-
- return false;
- }
-
- @Override
- public Role getRole(String roleName) throws NoSuchObjectException {
-
- return null;
- }
-
- @Override
- public List<String> listRoleNames() {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<Role> listRoles(String principalName, PrincipalType principalType) {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
- PrincipalType principalType) {
- return Collections.emptyList();
- }
-
- @Override
- public List<RolePrincipalGrant> listRoleMembers(String roleName) {
- return null;
- }
-
- @Override
- public Partition getPartitionWithAuth(String dbName, String tblName, List<String> partVals,
- String user_name, List<String> group_names) throws MetaException, NoSuchObjectException,
- InvalidObjectException {
-
- return null;
- }
-
- @Override
- public List<Partition> getPartitionsWithAuth(String dbName, String tblName, short maxParts,
- String userName, List<String> groupNames) throws MetaException, NoSuchObjectException,
- InvalidObjectException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<String> listPartitionNamesPs(String db_name, String tbl_name, List<String> part_vals,
- short max_parts) throws MetaException, NoSuchObjectException {
-
- return Collections.emptyList();
- }
-
- @Override
- public List<Partition> listPartitionsPsWithAuth(String db_name, String tbl_name,
- List<String> part_vals, short max_parts, String userName, List<String> groupNames)
- throws MetaException, InvalidObjectException, NoSuchObjectException {
-
- return Collections.emptyList();
- }
-
- @Override
- public long cleanupEvents() {
-
- return 0;
- }
-
- @Override
- public boolean addToken(String tokenIdentifier, String delegationToken) {
- return false;
- }
-
- @Override
- public boolean removeToken(String tokenIdentifier) {
- return false;
- }
-
- @Override
- public String getToken(String tokenIdentifier) {
- return null;
- }
-
- @Override
- public List<String> getAllTokenIdentifiers() {
- return Collections.emptyList();
- }
-
- @Override
- public int addMasterKey(String key) {
- return 0;
- }
-
- @Override
- public void updateMasterKey(Integer seqNo, String key) {
- }
-
- @Override
- public boolean removeMasterKey(Integer keySeq) {
- return false;
- }
-
- @Override
- public String[] getMasterKeys() {
- return new String[0];
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
- String principalName, PrincipalType principalType) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
- String principalName, PrincipalType principalType) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
- String principalName, PrincipalType principalType) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
- String principalName, PrincipalType principalType) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
- String principalName, PrincipalType principalType) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listGlobalGrantsAll() {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listDBGrantsAll(String dbName) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String dbName, String tableName, String partitionName, String columnName) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listTableGrantsAll(String dbName, String tableName) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listPartitionGrantsAll(String dbName, String tableName, String partitionName) {
- return Collections.emptyList();
- }
-
- @Override
- public List<HiveObjectPrivilege> listTableColumnGrantsAll(String dbName, String tableName, String columnName) {
- return Collections.emptyList();
- }
-
- @Override
- public ColumnStatistics getTableColumnStatistics(String dbName, String tableName,
- List<String> colName) throws MetaException, NoSuchObjectException {
- return null;
- }
-
- @Override
- public boolean deleteTableColumnStatistics(String dbName, String tableName,
- String colName)
- throws NoSuchObjectException, MetaException, InvalidObjectException {
- return false;
- }
-
-
- @Override
- public boolean deletePartitionColumnStatistics(String dbName, String tableName,
- String partName, List<String> partVals, String colName)
- throws NoSuchObjectException, MetaException, InvalidObjectException,
- InvalidInputException {
- return false;
-
- }
-
- @Override
- public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
- throws NoSuchObjectException, MetaException, InvalidObjectException {
- return false;
- }
-
- @Override
- public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,List<String> partVals)
- throws NoSuchObjectException, MetaException, InvalidObjectException {
- return false;
- }
-
- @Override
- public void verifySchema() throws MetaException {
- }
-
- @Override
- public String getMetaStoreSchemaVersion() throws MetaException {
- return null;
- }
-
- @Override
- public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException {
- }
-
- @Override
- public List<ColumnStatistics> getPartitionColumnStatistics(String dbName,
- String tblName, List<String> colNames, List<String> partNames)
- throws MetaException, NoSuchObjectException {
- return Collections.emptyList();
- }
-
- @Override
- public boolean doesPartitionExist(String dbName, String tableName,
- List<String> partVals) throws MetaException, NoSuchObjectException {
- return false;
- }
-
- @Override
- public boolean addPartitions(String dbName, String tblName, List<Partition> parts)
- throws InvalidObjectException, MetaException {
- return false;
- }
-
- @Override
- public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
- return false;
- }
-
- @Override
- public void dropPartitions(String dbName, String tblName, List<String> partNames) {
- }
-
- @Override
- public void createFunction(Function func) throws InvalidObjectException,
- MetaException {
- }
-
- @Override
- public void alterFunction(String dbName, String funcName, Function newFunction)
- throws InvalidObjectException, MetaException {
- }
-
- @Override
- public void dropFunction(String dbName, String funcName)
- throws MetaException, NoSuchObjectException, InvalidObjectException,
- InvalidInputException {
- }
-
- @Override
- public Function getFunction(String dbName, String funcName)
- throws MetaException {
- return null;
- }
-
- @Override
- public List<Function> getAllFunctions()
- throws MetaException {
- return Collections.emptyList();
- }
-
- @Override
- public List<String> getFunctions(String dbName, String pattern)
- throws MetaException {
- return Collections.emptyList();
- }
-
- @Override
- public AggrStats get_aggr_stats_for(String dbName,
- String tblName, List<String> partNames, List<String> colNames)
- throws MetaException {
- return null;
- }
-
- @Override
- public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
- return null;
- }
-
- @Override
- public void addNotificationEvent(NotificationEvent event) {
-
- }
-
- @Override
- public void cleanNotificationEvents(int olderThan) {
-
- }
-
- @Override
- public CurrentNotificationEventId getCurrentNotificationEventId() {
- return null;
- }
-
- @Override
- public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) {
- return null;
- }
-
- public void flushCache() {
-
- }
-
- @Override
- public ByteBuffer[] getFileMetadata(List<Long> fileIds) {
- return null;
- }
-
- @Override
- public void putFileMetadata(
- List<Long> fileIds, List<ByteBuffer> metadata, FileMetadataExprType type) {
- }
-
- @Override
- public boolean isFileMetadataSupported() {
- return false;
- }
-
- @Override
- public void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
- ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
- }
-
- @Override
- public int getTableCount() throws MetaException {
- return 0;
- }
-
- @Override
- public int getPartitionCount() throws MetaException {
- return 0;
- }
-
- @Override
- public int getDatabaseCount() throws MetaException {
- return 0;
- }
-
- @Override
- public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) {
- return null;
- }
-
- @Override
- public List<SQLPrimaryKey> getPrimaryKeys(String db_name, String tbl_name)
- throws MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public List<SQLForeignKey> getForeignKeys(String parent_db_name,
- String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
- throws MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public List<SQLUniqueConstraint> getUniqueConstraints(String db_name, String tbl_name)
- throws MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public List<SQLNotNullConstraint> getNotNullConstraints(String db_name, String tbl_name)
- throws MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public List<String> createTableWithConstraints(Table tbl,
- List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
- List<SQLUniqueConstraint> uniqueConstraints,
- List<SQLNotNullConstraint> notNullConstraints)
- throws InvalidObjectException, MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public void dropConstraint(String dbName, String tableName,
- String constraintName) throws NoSuchObjectException {
- // TODO Auto-generated method stub
- }
-
- @Override
- public List<String> addPrimaryKeys(List<SQLPrimaryKey> pks)
- throws InvalidObjectException, MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public List<String> addForeignKeys(List<SQLForeignKey> fks)
- throws InvalidObjectException, MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- public List<String> addUniqueConstraints(List<SQLUniqueConstraint> uks)
- throws InvalidObjectException, MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public List<String> addNotNullConstraints(List<SQLNotNullConstraint> nns)
- throws InvalidObjectException, MetaException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public Map<String, List<ColumnStatisticsObj>> getColStatsForTablePartitions(String dbName,
- String tableName) throws MetaException, NoSuchObjectException {
- // TODO Auto-generated method stub
- return null;
- }
-
- @Override
- public String getMetastoreDbUuid() throws MetaException {
- throw new MetaException("Get metastore uuid is not implemented");
- }
-
- @Override
- public void createResourcePlan(WMResourcePlan resourcePlan) throws MetaException {
- }
-
- @Override
- public WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException {
- return null;
- }
-
- @Override
- public List<WMResourcePlan> getAllResourcePlans() throws MetaException {
- return null;
- }
-
- @Override
- public void alterResourcePlan(String name, WMResourcePlan resourcePlan)
- throws NoSuchObjectException, InvalidOperationException, MetaException {
- }
-
- @Override
- public boolean validateResourcePlan(String name)
- throws NoSuchObjectException, InvalidObjectException, MetaException {
- return false;
- }
-
- @Override
- public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException {
- }
-
- @Override
- public void createWMTrigger(WMTrigger trigger) throws MetaException {
- }
-
- @Override
- public void alterWMTrigger(WMTrigger trigger)
- throws NoSuchObjectException, InvalidOperationException, MetaException {
- }
-
- @Override
- public void dropWMTrigger(String resourcePlanName, String triggerName)
- throws NoSuchObjectException, MetaException {
- }
-
- @Override
- public List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
- throws NoSuchObjectException, MetaException {
- return null;
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
deleted file mode 100644
index f581c7d..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.junit.Test;
-
-public class TestHiveMetastoreCli {
- private static final String[] CLI_ARGUMENTS = { "9999" };
-
- @Test
- public void testDefaultCliPortValue() {
- HiveConf configuration = new HiveConf();
- HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
- assert (cli.getPort() == HiveConf.getIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT));
- }
-
- @Test
- public void testOverriddenCliPortValue() {
- HiveConf configuration = new HiveConf();
- HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
- cli.parse(TestHiveMetastoreCli.CLI_ARGUMENTS);
-
- assert (cli.getPort() == 9999);
- }
-
- @Test
- public void testOverriddenMetastoreServerPortValue() {
- HiveConf configuration = new HiveConf();
- HiveConf.setIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT, 12345);
-
- HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
-
- assert (cli.getPort() == 12345);
- }
-
- @Test
- public void testCliOverridesConfiguration() {
- HiveConf configuration = new HiveConf();
- HiveConf.setIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT, 12345);
-
- HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
- cli.parse(CLI_ARGUMENTS);
-
- assert (cli.getPort() == 9999);
- }
-}