You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/09/02 21:57:07 UTC
svn commit: r1622108 [2/27] - in /hive/branches/tez: ./
accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/
beeline/src/java/org/apache/hive/beeline/
beeline/src/test/org/apache/hive/beeline/ bin/ bin/ext/ checkstyle/
common/src/java/o...
Modified: hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Tue Sep 2 19:56:56 2014
@@ -31,6 +31,7 @@ import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
+import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -43,7 +44,9 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.hive.conf.Validator.PatternSet;
import org.apache.hadoop.hive.conf.Validator.RangeValidator;
+import org.apache.hadoop.hive.conf.Validator.RatioValidator;
import org.apache.hadoop.hive.conf.Validator.StringSet;
+import org.apache.hadoop.hive.conf.Validator.TimeValidator;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.security.UserGroupInformation;
@@ -54,7 +57,6 @@ import org.apache.hive.common.HiveCompat
* Hive Configuration.
*/
public class HiveConf extends Configuration {
-
protected String hiveJar;
protected Properties origProp;
protected String auxJars;
@@ -353,9 +355,11 @@ public class HiveConf extends Configurat
METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1,
"Number of retries upon failure of Thrift metastore calls"),
- METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", 1,
+ METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", "1s",
+ new TimeValidator(TimeUnit.SECONDS),
"Number of seconds for the client to wait between consecutive connection attempts"),
- METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", 600,
+ METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", "600s",
+ new TimeValidator(TimeUnit.SECONDS),
"MetaStore Client socket timeout in seconds"),
METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine",
"password to use against metastore database"),
@@ -368,9 +372,9 @@ public class HiveConf extends Configurat
"JDBC connect string for a JDBC metastore"),
HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 1,
- "The number of times to retry a HMSHandler call if there were a connection error"),
- HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", 1000,
- "The number of milliseconds between HMSHandler retry attempts"),
+ "The number of times to retry a HMSHandler call if there were a connection error."),
+ HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", "1000ms",
+ new TimeValidator(TimeUnit.MILLISECONDS), "The time between HMSHandler retry attempts on failure."),
HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false,
"Whether to force reloading of the HMSHandler configuration (including\n" +
"the connection URL, before the next metastore query that accesses the\n" +
@@ -465,10 +469,12 @@ public class HiveConf extends Configurat
"for operations like drop-partition (disallow the drop-partition if the user in\n" +
"question doesn't have permissions to delete the corresponding directory\n" +
"on the storage)."),
- METASTORE_EVENT_CLEAN_FREQ("hive.metastore.event.clean.freq", 0L,
- "Frequency at which timer task runs to purge expired events in metastore(in seconds)."),
- METASTORE_EVENT_EXPIRY_DURATION("hive.metastore.event.expiry.duration", 0L,
- "Duration after which events expire from events table (in seconds)"),
+ METASTORE_EVENT_CLEAN_FREQ("hive.metastore.event.clean.freq", "0s",
+ new TimeValidator(TimeUnit.SECONDS),
+ "Frequency at which timer task runs to purge expired events in metastore."),
+ METASTORE_EVENT_EXPIRY_DURATION("hive.metastore.event.expiry.duration", "0s",
+ new TimeValidator(TimeUnit.SECONDS),
+ "Duration after which events expire from events table"),
METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", true,
"In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" +
"the client's reported user and group permissions. Note that this property must be set on \n" +
@@ -580,8 +586,9 @@ public class HiveConf extends Configurat
HIVE_CURRENT_DATABASE("hive.current.database", "", "Database name used by current session. Internal usage only.", true),
// for hive script operator
- HIVES_AUTO_PROGRESS_TIMEOUT("hive.auto.progress.timeout", 0,
- "How long to run autoprogressor for the script/UDTF operators (in seconds).\n" +
+ HIVES_AUTO_PROGRESS_TIMEOUT("hive.auto.progress.timeout", "0s",
+ new TimeValidator(TimeUnit.SECONDS),
+ "How long to run autoprogressor for the script/UDTF operators.\n" +
"Set to 0 for forever."),
HIVETABLENAME("hive.table.name", "", ""),
HIVEPARTITIONNAME("hive.partition.name", "", ""),
@@ -690,10 +697,9 @@ public class HiveConf extends Configurat
"because this may prevent TaskTracker from killing tasks with infinite loops."),
HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC"),
- "Default file format for CREATE TABLE statement. \n" +
- "Options are TextFile, SequenceFile, RCfile and ORC. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]"),
+ "Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]"),
HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile"),
- "Default file format for storing result of the query. Allows TextFile, SequenceFile and RCfile"),
+ "Default file format for storing result of the query."),
HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"),
// default serde for rcfile
@@ -720,8 +726,9 @@ public class HiveConf extends Configurat
"Whether to log the plan's progress every time a job's progress is checked.\n" +
"These logs are written to the location specified by hive.querylog.location"),
- HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL("hive.querylog.plan.progress.interval", 60000L,
- "The interval to wait between logging the plan's progress in milliseconds.\n" +
+ HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL("hive.querylog.plan.progress.interval", "60000ms",
+ new TimeValidator(TimeUnit.MILLISECONDS),
+ "The interval to wait between logging the plan's progress.\n" +
"If there is a whole number percentage change in the progress of the mappers or the reducers,\n" +
"the progress is logged regardless of this value.\n" +
"The actual interval will be the ceiling of (this value divided by the value of\n" +
@@ -800,7 +807,7 @@ public class HiveConf extends Configurat
" config (hive.exec.orc.block.padding.tolerance)."),
HIVEMERGEINPUTFORMATSTRIPELEVEL("hive.merge.input.format.stripe.level",
"org.apache.hadoop.hive.ql.io.orc.OrcFileStripeMergeInputFormat",
- "Input file format to use for ORC stripe level merging (for internal use only)"),
+ "Input file format to use for ORC stripe level merging (for internal use only)"),
HIVEMERGECURRENTJOBHASDYNAMICPARTITIONS(
"hive.merge.current.job.has.dynamic.partitions", false, ""),
@@ -841,8 +848,7 @@ public class HiveConf extends Configurat
HIVE_ORC_ENCODING_STRATEGY("hive.exec.orc.encoding.strategy", "SPEED", new StringSet("SPEED", "COMPRESSION"),
"Define the encoding strategy to use while writing data. Changing this will\n" +
"only affect the light weight encoding for integers. This flag will not\n" +
- "change the compression level of higher level compression codec (like ZLIB).\n" +
- "Possible options are SPEED and COMPRESSION."),
+ "change the compression level of higher level compression codec (like ZLIB)."),
HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false,
"If turned on splits generated by orc will include metadata about the stripes in the file. This\n" +
@@ -1030,9 +1036,11 @@ public class HiveConf extends Configurat
"When enabled dynamic partitioning column will be globally sorted.\n" +
"This way we can keep only one record writer open for each partition value\n" +
"in the reducer thereby reducing the memory pressure on reducers."),
- HIVESAMPLINGFORORDERBY("hive.optimize.sampling.orderby", false, ""),
- HIVESAMPLINGNUMBERFORORDERBY("hive.optimize.sampling.orderby.number", 1000, ""),
- HIVESAMPLINGPERCENTFORORDERBY("hive.optimize.sampling.orderby.percent", 0.1f, ""),
+
+ HIVESAMPLINGFORORDERBY("hive.optimize.sampling.orderby", false, "Uses sampling on order-by clause for parallel execution."),
+ HIVESAMPLINGNUMBERFORORDERBY("hive.optimize.sampling.orderby.number", 1000, "Total number of samples to be obtained."),
+ HIVESAMPLINGPERCENTFORORDERBY("hive.optimize.sampling.orderby.percent", 0.1f, new RatioValidator(),
+ "Probability with which a row will be chosen."),
// whether to optimize union followed by select followed by filesink
// It creates sub-directories in the final output, so should not be turned on in systems
@@ -1099,16 +1107,17 @@ public class HiveConf extends Configurat
"The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."),
HIVE_STATS_DEFAULT_AGGREGATOR("hive.stats.default.aggregator", "",
"The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."),
- HIVE_STATS_JDBC_TIMEOUT("hive.stats.jdbc.timeout", 30,
- "Timeout value (number of seconds) used by JDBC connection and statements."),
+ HIVE_STATS_JDBC_TIMEOUT("hive.stats.jdbc.timeout", "30s", new TimeValidator(TimeUnit.SECONDS),
+ "Timeout value used by JDBC connection and statements."),
HIVE_STATS_ATOMIC("hive.stats.atomic", false,
"whether to update metastore stats only if all stats are available"),
HIVE_STATS_RETRIES_MAX("hive.stats.retries.max", 0,
"Maximum number of retries when stats publisher/aggregator got an exception updating intermediate database. \n" +
"Default is no tries on failures."),
- HIVE_STATS_RETRIES_WAIT("hive.stats.retries.wait", 3000,
- "The base waiting window (in milliseconds) before the next retry. The actual wait time is calculated by " +
- "baseWindow * failures baseWindow * (failure 1) * (random number between [0.0,1.0])."),
+ HIVE_STATS_RETRIES_WAIT("hive.stats.retries.wait", "3000ms",
+ new TimeValidator(TimeUnit.MILLISECONDS),
+ "The base waiting window before the next retry. The actual wait time is calculated by " +
+ "baseWindow * failures baseWindow * (failure + 1) * (random number between [0.0,1.0])."),
HIVE_STATS_COLLECT_RAWDATASIZE("hive.stats.collect.rawdatasize", true,
"should the raw data size be collected when analyzing tables"),
CLIENT_STATS_COUNTERS("hive.client.stats.counters", "",
@@ -1218,8 +1227,9 @@ public class HiveConf extends Configurat
"The number of times you want to try to get all the locks"),
HIVE_UNLOCK_NUMRETRIES("hive.unlock.numretries", 10,
"The number of times you want to retry to do one unlock"),
- HIVE_LOCK_SLEEP_BETWEEN_RETRIES("hive.lock.sleep.between.retries", 60,
- "The sleep time (in seconds) between various retries"),
+ HIVE_LOCK_SLEEP_BETWEEN_RETRIES("hive.lock.sleep.between.retries", "60s",
+ new TimeValidator(TimeUnit.SECONDS),
+ "The sleep time between various retries"),
HIVE_LOCK_MAPRED_ONLY("hive.lock.mapred.only.operation", false,
"This param is to control whether or not only do lock on queries\n" +
"that need to execute at least one mapred job."),
@@ -1239,8 +1249,8 @@ public class HiveConf extends Configurat
// Transactions
HIVE_TXN_MANAGER("hive.txn.manager",
"org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager", ""),
- HIVE_TXN_TIMEOUT("hive.txn.timeout", 300,
- "time after which transactions are declared aborted if the client has not sent a heartbeat, in seconds."),
+ HIVE_TXN_TIMEOUT("hive.txn.timeout", "300s", new TimeValidator(TimeUnit.SECONDS),
+ "time after which transactions are declared aborted if the client has not sent a heartbeat."),
HIVE_TXN_MAX_OPEN_BATCH("hive.txn.max.open.batch", 1000,
"Maximum number of transactions that can be fetched in one call to open_txns().\n" +
@@ -1254,12 +1264,14 @@ public class HiveConf extends Configurat
HIVE_COMPACTOR_WORKER_THREADS("hive.compactor.worker.threads", 0,
"Number of compactor worker threads to run on this metastore instance."),
- HIVE_COMPACTOR_WORKER_TIMEOUT("hive.compactor.worker.timeout", 86400L,
- "Time in seconds, before a given compaction in working state is declared a failure\n" +
+ HIVE_COMPACTOR_WORKER_TIMEOUT("hive.compactor.worker.timeout", "86400s",
+ new TimeValidator(TimeUnit.SECONDS),
+ "Time before a given compaction in working state is declared a failure\n" +
"and returned to the initiated state."),
- HIVE_COMPACTOR_CHECK_INTERVAL("hive.compactor.check.interval", 300L,
- "Time in seconds between checks to see if any partitions need compacted.\n" +
+ HIVE_COMPACTOR_CHECK_INTERVAL("hive.compactor.check.interval", "300s",
+ new TimeValidator(TimeUnit.SECONDS),
+ "Time between checks to see if any partitions need compacted.\n" +
"This should be kept high because each check for compaction requires many calls against the NameNode."),
HIVE_COMPACTOR_DELTA_NUM_THRESHOLD("hive.compactor.delta.num.threshold", 10,
@@ -1296,7 +1308,7 @@ public class HiveConf extends Configurat
"Currently the query should be single sourced not having any subquery and should not have\n" +
"any aggregations or distincts (which incurs RS), lateral views and joins.\n" +
"1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only\n" +
- "2. more : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)\n"
+ "2. more : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)"
),
HIVEFETCHTASKCONVERSIONTHRESHOLD("hive.fetch.task.conversion.threshold", 1073741824L,
"Input threshold for applying hive.fetch.task.conversion. If target table is native, input length\n" +
@@ -1405,6 +1417,14 @@ public class HiveConf extends Configurat
"to construct a list exception handlers to handle exceptions thrown\n" +
"by record readers"),
+ // operation log configuration
+ HIVE_SERVER2_LOGGING_OPERATION_ENABLED("hive.server2.logging.operation.enabled", true,
+ "When true, HS2 will save operation logs"),
+ HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION("hive.server2.logging.operation.log.location",
+ "${system:java.io.tmpdir}" + File.separator + "${system:user.name}" + File.separator +
+ "operation_logs",
+ "Top level directory where operation logs are stored if logging functionality is enabled"),
+
// logging configuration
HIVE_LOG4J_FILE("hive.log4j.file", "",
"Hive log4j configuration file.\n" +
@@ -1460,12 +1480,12 @@ public class HiveConf extends Configurat
"table. From 0.12 onwards, they are displayed separately. This flag will let you\n" +
"get old behavior, if desired. See, test-case in patch for HIVE-6689."),
- HIVE_SERVER2_MAX_START_ATTEMPTS("hive.server2.max.start.attempts", 30L, new RangeValidator(0L, Long.MAX_VALUE),
+ HIVE_SERVER2_MAX_START_ATTEMPTS("hive.server2.max.start.attempts", 30L, new RangeValidator(0L, null),
"This number of times HiveServer2 will attempt to start before exiting, sleeping 60 seconds between retries. \n" +
"The default of 30 will keep trying for 30 minutes."),
HIVE_SERVER2_TRANSPORT_MODE("hive.server2.transport.mode", "binary", new StringSet("binary", "http"),
- "Server transport mode. \"binary\" or \"http\""),
+ "Transport mode of HiveServer2."),
// http (over thrift) transport settings
HIVE_SERVER2_THRIFT_HTTP_PORT("hive.server2.thrift.http.port", 10001,
@@ -1476,8 +1496,13 @@ public class HiveConf extends Configurat
"Minimum number of worker threads when in HTTP mode."),
HIVE_SERVER2_THRIFT_HTTP_MAX_WORKER_THREADS("hive.server2.thrift.http.max.worker.threads", 500,
"Maximum number of worker threads when in HTTP mode."),
- HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME("hive.server2.thrift.http.max.idle.time", 1800000,
- "Maximum idle time in milliseconds for a connection on the server when in HTTP mode."),
+ HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME("hive.server2.thrift.http.max.idle.time", "1800s",
+ new TimeValidator(TimeUnit.MILLISECONDS),
+ "Maximum idle time for a connection on the server when in HTTP mode."),
+ HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME("hive.server2.thrift.http.worker.keepalive.time", "60s",
+ new TimeValidator(TimeUnit.SECONDS),
+ "Keepalive time for an idle http worker thread. When the number of workers exceeds min workers, " +
+ "excessive threads are killed after this time interval."),
// binary transport settings
HIVE_SERVER2_THRIFT_PORT("hive.server2.thrift.port", 10000,
@@ -1500,21 +1525,26 @@ public class HiveConf extends Configurat
"Minimum number of Thrift worker threads"),
HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS("hive.server2.thrift.max.worker.threads", 500,
"Maximum number of Thrift worker threads"),
-
+ HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME("hive.server2.thrift.worker.keepalive.time", "60s",
+ new TimeValidator(TimeUnit.SECONDS),
+ "Keepalive time (in seconds) for an idle worker thread. When the number of workers exceeds min workers, " +
+ "excessive threads are killed after this time interval."),
// Configuration for async thread pool in SessionManager
HIVE_SERVER2_ASYNC_EXEC_THREADS("hive.server2.async.exec.threads", 100,
"Number of threads in the async thread pool for HiveServer2"),
- HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT("hive.server2.async.exec.shutdown.timeout", 10,
- "Time (in seconds) for which HiveServer2 shutdown will wait for async"),
+ HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT("hive.server2.async.exec.shutdown.timeout", "10s",
+ new TimeValidator(TimeUnit.SECONDS),
+ "Maximum time for which HiveServer2 shutdown will wait for async"),
HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE("hive.server2.async.exec.wait.queue.size", 100,
"Size of the wait queue for async thread pool in HiveServer2.\n" +
"After hitting this limit, the async thread pool will reject new requests."),
- HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME("hive.server2.async.exec.keepalive.time", 10,
- "Time (in seconds) that an idle HiveServer2 async thread (from the thread pool) will wait\n" +
- "for a new task to arrive before terminating"),
- HIVE_SERVER2_LONG_POLLING_TIMEOUT("hive.server2.long.polling.timeout", 5000L,
- "Time in milliseconds that HiveServer2 will wait,\n" +
- "before responding to asynchronous calls that use long polling"),
+ HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME("hive.server2.async.exec.keepalive.time", "10s",
+ new TimeValidator(TimeUnit.SECONDS),
+ "Time that an idle HiveServer2 async thread (from the thread pool) will wait for a new task\n" +
+ "to arrive before terminating"),
+ HIVE_SERVER2_LONG_POLLING_TIMEOUT("hive.server2.long.polling.timeout", "5000ms",
+ new TimeValidator(TimeUnit.MILLISECONDS),
+ "Time that HiveServer2 will wait before responding to asynchronous calls that use long polling"),
// HiveServer2 auth configuration
HIVE_SERVER2_AUTHENTICATION("hive.server2.authentication", "NONE",
@@ -1557,8 +1587,8 @@ public class HiveConf extends Configurat
"must be a proper implementation of the interface\n" +
"org.apache.hive.service.auth.PasswdAuthenticationProvider. HiveServer2\n" +
"will call its Authenticate(user, passed) method to authenticate requests.\n" +
- "The implementation may optionally extend Hadoop's\n" +
- "org.apache.hadoop.conf.Configured class to grab Hive's Configuration object."),
+ "The implementation may optionally implement Hadoop's\n" +
+ "org.apache.hadoop.conf.Configurable class to grab Hive's Configuration object."),
HIVE_SERVER2_PAM_SERVICES("hive.server2.authentication.pam.services", null,
"List of the underlying pam services that should be used when auth type is PAM\n" +
"A file with the same name must exist in /etc/pam.d"),
@@ -1579,6 +1609,18 @@ public class HiveConf extends Configurat
HIVE_SECURITY_COMMAND_WHITELIST("hive.security.command.whitelist", "set,reset,dfs,add,list,delete,compile",
"Comma separated list of non-SQL Hive commands users are authorized to execute"),
+ HIVE_SERVER2_SESSION_CHECK_INTERVAL("hive.server2.session.check.interval", "0ms",
+ new TimeValidator(TimeUnit.MILLISECONDS, 3000l, true, null, false),
+ "The check interval for session/operation timeout, which can be disabled by setting to zero or negative value."),
+ HIVE_SERVER2_IDLE_SESSION_TIMEOUT("hive.server2.idle.session.timeout", "0ms",
+ new TimeValidator(TimeUnit.MILLISECONDS),
+ "Session will be closed when it's not accessed for this duration, which can be disabled by setting to zero or negative value."),
+ HIVE_SERVER2_IDLE_OPERATION_TIMEOUT("hive.server2.idle.operation.timeout", "0ms",
+ new TimeValidator(TimeUnit.MILLISECONDS),
+ "Operation will be closed when it's not accessed for this duration of time, which can be disabled by setting to zero value.\n" +
+ " With positive value, it's checked for operations in terminal state only (FINISHED, CANCELED, CLOSED, ERROR).\n" +
+ " With negative value, it's checked for all of the operations regardless of state."),
+
HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list",
"hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
"Comma separated list of configuration options which are immutable at runtime"),
@@ -1636,8 +1678,9 @@ public class HiveConf extends Configurat
"Enable list bucketing optimizer. Default value is false so that we disable it by default."),
// Allow TCP Keep alive socket option for for HiveServer or a maximum timeout for the socket.
- SERVER_READ_SOCKET_TIMEOUT("hive.server.read.socket.timeout", 10,
- "Timeout for the HiveServer to close the connection if no response from the client in N seconds, defaults to 10 seconds."),
+ SERVER_READ_SOCKET_TIMEOUT("hive.server.read.socket.timeout", "10s",
+ new TimeValidator(TimeUnit.SECONDS),
+ "Timeout for the HiveServer to close the connection if no response from the client. By default, 10 seconds."),
SERVER_TCP_KEEP_ALIVE("hive.server.tcp.keepalive", true,
"Whether to enable TCP keepalive for the Hive Server. Keepalive will prevent accumulation of half-open connections."),
@@ -1696,8 +1739,9 @@ public class HiveConf extends Configurat
"turning on Tez for HiveServer2. The user could potentially want to run queries\n" +
"over Tez without the pool of sessions."),
- HIVE_QUOTEDID_SUPPORT("hive.support.quoted.identifiers", "column", new PatternSet("none", "column"),
- "Whether to use quoted identifier. 'none' ot 'column' can be used. \n" +
+ HIVE_QUOTEDID_SUPPORT("hive.support.quoted.identifiers", "column",
+ new StringSet("none", "column"),
+ "Whether to use quoted identifier. 'none' or 'column' can be used. \n" +
" none: default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers.\n" +
" column: implies column names can contain any character."
),
@@ -1717,8 +1761,9 @@ public class HiveConf extends Configurat
HIVE_CHECK_CROSS_PRODUCT("hive.exec.check.crossproducts", true,
"Check if a plan contains a Cross Product. If there is one, output a warning to the Session's console."),
- HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL("hive.localize.resource.wait.interval", 5000L,
- "Time in milliseconds to wait for another thread to localize the same resource for hive-tez."),
+ HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL("hive.localize.resource.wait.interval", "5000ms",
+ new TimeValidator(TimeUnit.MILLISECONDS),
+ "Time to wait for another thread to localize the same resource for hive-tez."),
HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS("hive.localize.resource.num.wait.attempts", 5,
"The number of attempts waiting for localizing a resource in hive-tez."),
TEZ_AUTO_REDUCER_PARALLELISM("hive.tez.auto.reducer.parallelism", false,
@@ -1828,11 +1873,29 @@ public class HiveConf extends Configurat
return validator == null ? null : validator.validate(value);
}
+ public String validatorDescription() {
+ return validator == null ? null : validator.toDescription();
+ }
+
public String typeString() {
- return valType.typeString();
+ String type = valType.typeString();
+ if (valType == VarType.STRING && validator != null) {
+ if (validator instanceof TimeValidator) {
+ type += "(TIME)";
+ }
+ }
+ return type;
+ }
+
+ public String getRawDescription() {
+ return description;
}
public String getDescription() {
+ String validator = validatorDescription();
+ if (validator != null) {
+ return validator + ".\n" + description;
+ }
return description;
}
@@ -1968,6 +2031,82 @@ public class HiveConf extends Configurat
setIntVar(this, var, val);
}
+ public static long getTimeVar(Configuration conf, ConfVars var, TimeUnit outUnit) {
+ return toTime(getVar(conf, var), getDefaultTimeUnit(var), outUnit);
+ }
+
+ public static void setTimeVar(Configuration conf, ConfVars var, long time, TimeUnit timeunit) {
+ assert (var.valClass == String.class) : var.varname;
+ conf.set(var.varname, time + stringFor(timeunit));
+ }
+
+ public long getTimeVar(ConfVars var, TimeUnit outUnit) {
+ return getTimeVar(this, var, outUnit);
+ }
+
+ public void setTimeVar(ConfVars var, long time, TimeUnit outUnit) {
+ setTimeVar(this, var, time, outUnit);
+ }
+
+ private static TimeUnit getDefaultTimeUnit(ConfVars var) {
+ TimeUnit inputUnit = null;
+ if (var.validator instanceof TimeValidator) {
+ inputUnit = ((TimeValidator)var.validator).getTimeUnit();
+ }
+ return inputUnit;
+ }
+
+ public static long toTime(String value, TimeUnit inputUnit, TimeUnit outUnit) {
+ String[] parsed = parseTime(value.trim());
+ return outUnit.convert(Long.valueOf(parsed[0].trim().trim()), unitFor(parsed[1].trim(), inputUnit));
+ }
+
+ private static String[] parseTime(String value) {
+ char[] chars = value.toCharArray();
+ int i = 0;
+ for (; i < chars.length && (chars[i] == '-' || Character.isDigit(chars[i])); i++) {
+ }
+ return new String[] {value.substring(0, i), value.substring(i)};
+ }
+
+ public static TimeUnit unitFor(String unit, TimeUnit defaultUnit) {
+ unit = unit.trim().toLowerCase();
+ if (unit.isEmpty()) {
+ if (defaultUnit == null) {
+ throw new IllegalArgumentException("Time unit is not specified");
+ }
+ return defaultUnit;
+ } else if (unit.equals("d") || unit.startsWith("day")) {
+ return TimeUnit.DAYS;
+ } else if (unit.equals("h") || unit.startsWith("hour")) {
+ return TimeUnit.HOURS;
+ } else if (unit.equals("m") || unit.startsWith("min")) {
+ return TimeUnit.MINUTES;
+ } else if (unit.equals("s") || unit.startsWith("sec")) {
+ return TimeUnit.SECONDS;
+ } else if (unit.equals("ms") || unit.startsWith("msec")) {
+ return TimeUnit.MILLISECONDS;
+ } else if (unit.equals("us") || unit.startsWith("usec")) {
+ return TimeUnit.MICROSECONDS;
+ } else if (unit.equals("ns") || unit.startsWith("nsec")) {
+ return TimeUnit.NANOSECONDS;
+ }
+ throw new IllegalArgumentException("Invalid time unit " + unit);
+ }
+
+ public static String stringFor(TimeUnit timeunit) {
+ switch (timeunit) {
+ case DAYS: return "day";
+ case HOURS: return "hour";
+ case MINUTES: return "min";
+ case SECONDS: return "sec";
+ case MILLISECONDS: return "msec";
+ case MICROSECONDS: return "usec";
+ case NANOSECONDS: return "nsec";
+ }
+ throw new IllegalArgumentException("Invalid timeunit " + timeunit);
+ }
+
public static long getLongVar(Configuration conf, ConfVars var) {
assert (var.valClass == Long.class) : var.varname;
return conf.getLong(var.varname, var.defaultLongVal);
Modified: hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/Validator.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/Validator.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/Validator.java (original)
+++ hive/branches/tez/common/src/java/org/apache/hadoop/hive/conf/Validator.java Tue Sep 2 19:56:56 2014
@@ -22,6 +22,7 @@ import java.util.ArrayList;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
/**
@@ -31,57 +32,85 @@ public interface Validator {
String validate(String value);
- static class StringSet implements Validator {
+ String toDescription();
+ class StringSet implements Validator {
+
+ private final boolean caseSensitive;
private final Set<String> expected = new LinkedHashSet<String>();
public StringSet(String... values) {
+ this(false, values);
+ }
+
+ public StringSet(boolean caseSensitive, String... values) {
+ this.caseSensitive = caseSensitive;
for (String value : values) {
- expected.add(value.toLowerCase());
+ expected.add(caseSensitive ? value : value.toLowerCase());
}
}
@Override
public String validate(String value) {
- if (value == null || !expected.contains(value.toLowerCase())) {
+ if (value == null || !expected.contains(caseSensitive ? value : value.toLowerCase())) {
return "Invalid value.. expects one of " + expected;
}
return null;
}
+
+ @Override
+ public String toDescription() {
+ return "Expects one of " + expected;
+ }
}
- static enum RANGE_TYPE {
+ enum TYPE {
INT {
@Override
protected boolean inRange(String value, Object lower, Object upper) {
int ivalue = Integer.parseInt(value);
- return (Integer)lower <= ivalue && ivalue <= (Integer)upper;
+ if (lower != null && ivalue < (Integer)lower) {
+ return false;
+ }
+ if (upper != null && ivalue > (Integer)upper) {
+ return false;
+ }
+ return true;
}
},
LONG {
@Override
protected boolean inRange(String value, Object lower, Object upper) {
long lvalue = Long.parseLong(value);
- return (Long)lower <= lvalue && lvalue <= (Long)upper;
+ if (lower != null && lvalue < (Long)lower) {
+ return false;
+ }
+ if (upper != null && lvalue > (Long)upper) {
+ return false;
+ }
+ return true;
}
},
FLOAT {
@Override
protected boolean inRange(String value, Object lower, Object upper) {
float fvalue = Float.parseFloat(value);
- return (Float)lower <= fvalue && fvalue <= (Float)upper;
+ if (lower != null && fvalue < (Float)lower) {
+ return false;
+ }
+ if (upper != null && fvalue > (Float)upper) {
+ return false;
+ }
+ return true;
}
};
- public static RANGE_TYPE valueOf(Object lower, Object upper) {
- if (lower instanceof Integer && upper instanceof Integer) {
- assert (Integer)lower < (Integer)upper;
+ public static TYPE valueOf(Object lower, Object upper) {
+ if (lower instanceof Integer || upper instanceof Integer) {
return INT;
- } else if (lower instanceof Long && upper instanceof Long) {
- assert (Long)lower < (Long)upper;
+ } else if (lower instanceof Long || upper instanceof Long) {
return LONG;
- } else if (lower instanceof Float && upper instanceof Float) {
- assert (Float)lower < (Float)upper;
+ } else if (lower instanceof Float || upper instanceof Float) {
return FLOAT;
}
throw new IllegalArgumentException("invalid range from " + lower + " to " + upper);
@@ -90,15 +119,15 @@ public interface Validator {
protected abstract boolean inRange(String value, Object lower, Object upper);
}
- static class RangeValidator implements Validator {
+ class RangeValidator implements Validator {
- private final RANGE_TYPE type;
+ private final TYPE type;
private final Object lower, upper;
public RangeValidator(Object lower, Object upper) {
this.lower = lower;
this.upper = upper;
- this.type = RANGE_TYPE.valueOf(lower, upper);
+ this.type = TYPE.valueOf(lower, upper);
}
@Override
@@ -115,9 +144,23 @@ public interface Validator {
}
return null;
}
+
+ @Override
+ public String toDescription() {
+ if (lower == null && upper == null) {
+ return null;
+ }
+ if (lower != null && upper != null) {
+ return "Expects value between " + lower + " and " + upper;
+ }
+ if (lower != null) {
+ return "Expects value bigger than " + lower;
+ }
+ return "Expects value smaller than " + upper;
+ }
}
- static class PatternSet implements Validator {
+ class PatternSet implements Validator {
private final List<Pattern> expected = new ArrayList<Pattern>();
@@ -139,15 +182,20 @@ public interface Validator {
}
return "Invalid value.. expects one of patterns " + expected;
}
+
+ @Override
+ public String toDescription() {
+ return "Expects one of the pattern in " + expected;
+ }
}
- static class RatioValidator implements Validator {
+ class RatioValidator implements Validator {
@Override
public String validate(String value) {
try {
float fvalue = Float.valueOf(value);
- if (fvalue <= 0 || fvalue >= 1) {
+ if (fvalue < 0 || fvalue > 1) {
return "Invalid ratio " + value + ", which should be in between 0 to 1";
}
} catch (NumberFormatException e) {
@@ -155,5 +203,77 @@ public interface Validator {
}
return null;
}
+
+ @Override
+ public String toDescription() {
+ return "Expects value between 0.0f and 1.0f";
+ }
+ }
+
+ class TimeValidator implements Validator {
+
+ private final TimeUnit timeUnit;
+
+ private final Long min;
+ private final boolean minInclusive;
+
+ private final Long max;
+ private final boolean maxInclusive;
+
+ public TimeValidator(TimeUnit timeUnit) {
+ this(timeUnit, null, false, null, false);
+ }
+
+ public TimeValidator(TimeUnit timeUnit,
+ Long min, boolean minInclusive, Long max, boolean maxInclusive) {
+ this.timeUnit = timeUnit;
+ this.min = min;
+ this.minInclusive = minInclusive;
+ this.max = max;
+ this.maxInclusive = maxInclusive;
+ }
+
+ public TimeUnit getTimeUnit() {
+ return timeUnit;
+ }
+
+ @Override
+ public String validate(String value) {
+ try {
+ long time = HiveConf.toTime(value, timeUnit, timeUnit);
+ if (min != null && (minInclusive ? time < min : time <= min)) {
+ return value + " is smaller than " + timeString(min);
+ }
+ if (max != null && (maxInclusive ? time > max : time >= max)) {
+ return value + " is bigger than " + timeString(max);
+ }
+ } catch (Exception e) {
+ return e.toString();
+ }
+ return null;
+ }
+
+ public String toDescription() {
+ String description =
+ "Expects a time value with unit " +
+ "(d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec)" +
+ ", which is " + HiveConf.stringFor(timeUnit) + " if not specified";
+ if (min != null && max != null) {
+ description += ".\nThe time should be in between " +
+ timeString(min) + (minInclusive ? " (inclusive)" : " (exclusive)") + " and " +
+ timeString(max) + (maxInclusive ? " (inclusive)" : " (exclusive)");
+ } else if (min != null) {
+ description += ".\nThe time should be bigger than " +
+ (minInclusive ? "or equal to " : "") + timeString(min);
+ } else if (max != null) {
+ description += ".\nThe time should be smaller than " +
+ (maxInclusive ? "or equal to " : "") + timeString(max);
+ }
+ return description;
+ }
+
+ private String timeString(long time) {
+ return time + " " + HiveConf.stringFor(timeUnit);
+ }
}
}
Modified: hive/branches/tez/contrib/src/test/results/clientnegative/serde_regex.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/contrib/src/test/results/clientnegative/serde_regex.q.out?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/contrib/src/test/results/clientnegative/serde_regex.q.out (original)
+++ hive/branches/tez/contrib/src/test/results/clientnegative/serde_regex.q.out Tue Sep 2 19:56:56 2014
@@ -56,7 +56,7 @@ STAGE PLANS:
serde properties:
input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))?
output.format.string %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s
- name: serde_regex
+ name: default.serde_regex
PREHOOK: query: CREATE TABLE serde_regex(
host STRING,
Modified: hive/branches/tez/contrib/src/test/results/clientpositive/fileformat_base64.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/contrib/src/test/results/clientpositive/fileformat_base64.q.out?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/contrib/src/test/results/clientpositive/fileformat_base64.q.out (original)
+++ hive/branches/tez/contrib/src/test/results/clientpositive/fileformat_base64.q.out Tue Sep 2 19:56:56 2014
@@ -22,7 +22,7 @@ STAGE PLANS:
columns: key int, value string
input format: org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat
output format: org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextOutputFormat
- name: base64_test
+ name: default.base64_test
PREHOOK: query: CREATE TABLE base64_test(key INT, value STRING) STORED AS
INPUTFORMAT 'org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat'
Modified: hive/branches/tez/contrib/src/test/results/clientpositive/serde_regex.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/contrib/src/test/results/clientpositive/serde_regex.q.out?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/contrib/src/test/results/clientpositive/serde_regex.q.out (original)
+++ hive/branches/tez/contrib/src/test/results/clientpositive/serde_regex.q.out Tue Sep 2 19:56:56 2014
@@ -48,7 +48,7 @@ STAGE PLANS:
serde properties:
input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))?
output.format.string %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s
- name: serde_regex
+ name: default.serde_regex
PREHOOK: query: CREATE TABLE serde_regex(
host STRING,
Modified: hive/branches/tez/data/files/parquet_types.txt
URL: http://svn.apache.org/viewvc/hive/branches/tez/data/files/parquet_types.txt?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/data/files/parquet_types.txt (original)
+++ hive/branches/tez/data/files/parquet_types.txt Tue Sep 2 19:56:56 2014
@@ -1,21 +1,21 @@
-100|1|1|1.0|0.0|abc|2011-01-01 01:01:01.111111111
-101|2|2|1.1|0.3|def|2012-02-02 02:02:02.222222222
-102|3|3|1.2|0.6|ghi|2013-03-03 03:03:03.333333333
-103|1|4|1.3|0.9|jkl|2014-04-04 04:04:04.444444444
-104|2|5|1.4|1.2|mno|2015-05-05 05:05:05.555555555
-105|3|1|1.0|1.5|pqr|2016-06-06 06:06:06.666666666
-106|1|2|1.1|1.8|stu|2017-07-07 07:07:07.777777777
-107|2|3|1.2|2.1|vwx|2018-08-08 08:08:08.888888888
-108|3|4|1.3|2.4|yza|2019-09-09 09:09:09.999999999
-109|1|5|1.4|2.7|bcd|2020-10-10 10:10:10.101010101
-110|2|1|1.0|3.0|efg|2021-11-11 11:11:11.111111111
-111|3|2|1.1|3.3|hij|2022-12-12 12:12:12.121212121
-112|1|3|1.2|3.6|klm|2023-01-02 13:13:13.131313131
-113|2|4|1.3|3.9|nop|2024-02-02 14:14:14.141414141
-114|3|5|1.4|4.2|qrs|2025-03-03 15:15:15.151515151
-115|1|1|1.0|4.5|tuv|2026-04-04 16:16:16.161616161
-116|2|2|1.1|4.8|wxy|2027-05-05 17:17:17.171717171
-117|3|3|1.2|5.1|zab|2028-06-06 18:18:18.181818181
-118|1|4|1.3|5.4|cde|2029-07-07 19:19:19.191919191
-119|2|5|1.4|5.7|fgh|2030-08-08 20:20:20.202020202
-120|3|1|1.0|6.0|ijk|2031-09-09 21:21:21.212121212
+100|1|1|1.0|0.0|abc|2011-01-01 01:01:01.111111111|a |a
+101|2|2|1.1|0.3|def|2012-02-02 02:02:02.222222222|ab |ab
+102|3|3|1.2|0.6|ghi|2013-03-03 03:03:03.333333333|abc|abc
+103|1|4|1.3|0.9|jkl|2014-04-04 04:04:04.444444444|abcd|abcd
+104|2|5|1.4|1.2|mno|2015-05-05 05:05:05.555555555|abcde|abcde
+105|3|1|1.0|1.5|pqr|2016-06-06 06:06:06.666666666|abcdef|abcdef
+106|1|2|1.1|1.8|stu|2017-07-07 07:07:07.777777777|abcdefg|abcdefg
+107|2|3|1.2|2.1|vwx|2018-08-08 08:08:08.888888888|bcdefg|abcdefgh
+108|3|4|1.3|2.4|yza|2019-09-09 09:09:09.999999999|cdefg|abcdefghijklmnop
+109|1|5|1.4|2.7|bcd|2020-10-10 10:10:10.101010101|klmno|abcdedef
+110|2|1|1.0|3.0|efg|2021-11-11 11:11:11.111111111|pqrst|abcdede
+111|3|2|1.1|3.3|hij|2022-12-12 12:12:12.121212121|nopqr|abcded
+112|1|3|1.2|3.6|klm|2023-01-02 13:13:13.131313131|opqrs|abcdd
+113|2|4|1.3|3.9|nop|2024-02-02 14:14:14.141414141|pqrst|abc
+114|3|5|1.4|4.2|qrs|2025-03-03 15:15:15.151515151|qrstu|b
+115|1|1|1.0|4.5|tuv|2026-04-04 16:16:16.161616161|rstuv|abcded
+116|2|2|1.1|4.8|wxy|2027-05-05 17:17:17.171717171|stuvw|abcded
+117|3|3|1.2|5.1|zab|2028-06-06 18:18:18.181818181|tuvwx|abcded
+118|1|4|1.3|5.4|cde|2029-07-07 19:19:19.191919191|uvwzy|abcdede
+119|2|5|1.4|5.7|fgh|2030-08-08 20:20:20.202020202|vwxyz|abcdede
+120|3|1|1.0|6.0|ijk|2031-09-09 21:21:21.212121212|wxyza|abcde
Modified: hive/branches/tez/hbase-handler/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/tez/hbase-handler/pom.xml?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/hbase-handler/pom.xml (original)
+++ hive/branches/tez/hbase-handler/pom.xml Tue Sep 2 19:56:56 2014
@@ -36,34 +36,9 @@
<!-- intra-project -->
<dependency>
<groupId>org.apache.hive</groupId>
- <artifactId>hive-common</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-metastore</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-serde</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-service</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
<artifactId>hive-exec</artifactId>
<version>${project.version}</version>
</dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-shims</artifactId>
- <version>${project.version}</version>
- </dependency>
<!-- inter-project -->
<dependency>
<groupId>commons-lang</groupId>
Propchange: hive/branches/tez/hbase-handler/pom.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Tue Sep 2 19:56:56 2014
@@ -0,0 +1,5 @@
+/hive/branches/branch-0.11/hbase-handler/pom.xml:1480385,1480458,1481120,1481344,1481346,1481348,1481352,1483872,1505184
+/hive/branches/spark/hbase-handler/pom.xml:1608589-1621357
+/hive/branches/tez/hbase-handler/pom.xml:1494760-1538879
+/hive/branches/vectorization/hbase-handler/pom.xml:1466908-1527856
+/hive/trunk/hbase-handler/pom.xml:1494760-1622080
Modified: hive/branches/tez/hbase-handler/src/test/results/negative/cascade_dbdrop.q.out
URL: http://svn.apache.org/viewvc/hive/branches/tez/hbase-handler/src/test/results/negative/cascade_dbdrop.q.out?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/hbase-handler/src/test/results/negative/cascade_dbdrop.q.out (original)
+++ hive/branches/tez/hbase-handler/src/test/results/negative/cascade_dbdrop.q.out Tue Sep 2 19:56:56 2014
@@ -20,7 +20,7 @@ WITH SERDEPROPERTIES ("hbase.columns.map
TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:hbasedb
-PREHOOK: Output: hbaseDB@hbaseDB.hbase_table_0
+PREHOOK: Output: hbaseDB@hbase_table_0
POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
-- Hadoop 0.23 changes the behavior FsShell on Exit Codes
-- In Hadoop 0.20
@@ -37,7 +37,6 @@ WITH SERDEPROPERTIES ("hbase.columns.map
TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:hbasedb
-POSTHOOK: Output: hbaseDB@hbaseDB.hbase_table_0
POSTHOOK: Output: hbaseDB@hbase_table_0
Found 3 items
#### A masked pattern was here ####
Modified: hive/branches/tez/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java (original)
+++ hive/branches/tez/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java Tue Sep 2 19:56:56 2014
@@ -86,7 +86,7 @@ final class CreateTableHook extends HCat
"Operation not supported. Create table as " +
"Select is not a valid operation.");
- case HiveParser.TOK_TABLEBUCKETS:
+ case HiveParser.TOK_ALTERTABLE_BUCKETS:
break;
case HiveParser.TOK_LIKETABLE:
Modified: hive/branches/tez/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java (original)
+++ hive/branches/tez/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java Tue Sep 2 19:56:56 2014
@@ -71,7 +71,7 @@ public class HCatSemanticAnalyzer extend
hook = new CreateDatabaseHook();
return hook.preAnalyze(context, ast);
- case HiveParser.TOK_ALTERTABLE_PARTITION:
+ case HiveParser.TOK_ALTERTABLE:
if (((ASTNode) ast.getChild(1)).getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) {
return ast;
} else if (((ASTNode) ast.getChild(1)).getToken().getType() == HiveParser.TOK_ALTERTABLE_MERGEFILES) {
@@ -163,7 +163,6 @@ public class HCatSemanticAnalyzer extend
case HiveParser.TOK_CREATETABLE:
case HiveParser.TOK_CREATEDATABASE:
- case HiveParser.TOK_ALTERTABLE_PARTITION:
// HCat will allow these operations to be performed.
// Database DDL
@@ -178,12 +177,20 @@ public class HCatSemanticAnalyzer extend
case HiveParser.TOK_CREATEINDEX:
case HiveParser.TOK_DROPINDEX:
case HiveParser.TOK_SHOWINDEXES:
+ break;
// View DDL
//case HiveParser.TOK_ALTERVIEW_ADDPARTS:
- case HiveParser.TOK_ALTERVIEW_DROPPARTS:
- case HiveParser.TOK_ALTERVIEW_PROPERTIES:
- case HiveParser.TOK_ALTERVIEW_RENAME:
+ case HiveParser.TOK_ALTERVIEW:
+ switch (ast.getChild(1).getType()) {
+ case HiveParser.TOK_ALTERVIEW_ADDPARTS:
+ case HiveParser.TOK_ALTERVIEW_DROPPARTS:
+ case HiveParser.TOK_ALTERVIEW_RENAME:
+ case HiveParser.TOK_ALTERVIEW_PROPERTIES:
+ case HiveParser.TOK_ALTERVIEW_DROPPROPERTIES:
+ }
+ break;
+
case HiveParser.TOK_CREATEVIEW:
case HiveParser.TOK_DROPVIEW:
@@ -205,20 +212,39 @@ public class HCatSemanticAnalyzer extend
case HiveParser.TOK_DESCFUNCTION:
case HiveParser.TOK_SHOWFUNCTIONS:
case HiveParser.TOK_EXPLAIN:
+ break;
// Table DDL
- case HiveParser.TOK_ALTERTABLE_ADDPARTS:
- case HiveParser.TOK_ALTERTABLE_ADDCOLS:
- case HiveParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
- case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES:
- case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT:
- case HiveParser.TOK_ALTERTABLE_DROPPARTS:
- case HiveParser.TOK_ALTERTABLE_PROPERTIES:
- case HiveParser.TOK_ALTERTABLE_RENAME:
- case HiveParser.TOK_ALTERTABLE_RENAMECOL:
- case HiveParser.TOK_ALTERTABLE_REPLACECOLS:
- case HiveParser.TOK_ALTERTABLE_SERIALIZER:
- case HiveParser.TOK_ALTERTABLE_TOUCH:
+ case HiveParser.TOK_ALTERTABLE:
+ switch (ast.getChild(1).getType()) {
+ case HiveParser.TOK_ALTERTABLE_ADDPARTS:
+ case HiveParser.TOK_ALTERTABLE_ADDCOLS:
+ case HiveParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
+ case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES:
+ case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT:
+ case HiveParser.TOK_ALTERTABLE_DROPPARTS:
+ case HiveParser.TOK_ALTERTABLE_PROPERTIES:
+ case HiveParser.TOK_ALTERTABLE_DROPPROPERTIES:
+ case HiveParser.TOK_ALTERTABLE_RENAME:
+ case HiveParser.TOK_ALTERTABLE_RENAMECOL:
+ case HiveParser.TOK_ALTERTABLE_REPLACECOLS:
+ case HiveParser.TOK_ALTERTABLE_SERIALIZER:
+ case HiveParser.TOK_ALTERTABLE_TOUCH:
+ case HiveParser.TOK_ALTERTABLE_ARCHIVE:
+ case HiveParser.TOK_ALTERTABLE_UNARCHIVE:
+ case HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION:
+ case HiveParser.TOK_ALTERTABLE_SKEWED:
+ case HiveParser.TOK_ALTERTABLE_FILEFORMAT:
+ case HiveParser.TOK_ALTERTABLE_PROTECTMODE:
+ case HiveParser.TOK_ALTERTABLE_LOCATION:
+ case HiveParser.TOK_ALTERTABLE_MERGEFILES:
+ case HiveParser.TOK_ALTERTABLE_RENAMEPART:
+ case HiveParser.TOK_ALTERTABLE_SKEWED_LOCATION:
+ case HiveParser.TOK_ALTERTABLE_BUCKETS:
+ case HiveParser.TOK_ALTERTABLE_COMPACT:
+ }
+ break;
+
case HiveParser.TOK_DESCTABLE:
case HiveParser.TOK_DROPTABLE:
case HiveParser.TOK_SHOW_TABLESTATUS:
Modified: hive/branches/tez/hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/DataType.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/DataType.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/DataType.java (original)
+++ hive/branches/tez/hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/DataType.java Tue Sep 2 19:56:56 2014
@@ -224,7 +224,7 @@ public abstract class DataType {
if (o1[i] == o2[i]) {
continue;
}
- if (o1[i] > o1[i]) {
+ if (o1[i] > o2[i]) {
return 1;
} else {
return -1;
Modified: hive/branches/tez/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java (original)
+++ hive/branches/tez/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java Tue Sep 2 19:56:56 2014
@@ -23,6 +23,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
+import java.util.concurrent.TimeUnit;
import junit.framework.TestCase;
@@ -91,13 +92,11 @@ public class TestPermsGrp extends TestCa
hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://127.0.0.1:" + msPort);
hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3);
- hcatConf.setIntVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 120);
hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
- hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
- hcatConf.set(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT.varname, "60");
+ hcatConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 60, TimeUnit.SECONDS);
hcatConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
clientWH = new Warehouse(hcatConf);
msc = new HiveMetaStoreClient(hcatConf, null);
Modified: hive/branches/tez/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java (original)
+++ hive/branches/tez/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java Tue Sep 2 19:56:56 2014
@@ -156,7 +156,7 @@ public class TestSemanticAnalysis extend
public void testCreateTableIfNotExists() throws MetaException, TException, NoSuchObjectException, CommandNeedRetryException {
hcatDriver.run("drop table " + TBL_NAME);
- hcatDriver.run("create table junit_sem_analysis (a int) stored as RCFILE");
+ hcatDriver.run("create table " + TBL_NAME + " (a int) stored as RCFILE");
Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME);
List<FieldSchema> cols = tbl.getSd().getCols();
assertEquals(1, cols.size());
Modified: hive/branches/tez/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java (original)
+++ hive/branches/tez/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java Tue Sep 2 19:56:56 2014
@@ -25,6 +25,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
+import java.util.concurrent.TimeUnit;
import junit.framework.Assert;
@@ -116,7 +117,7 @@ public class TestHCatPartitionPublish {
+ msPort);
hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3);
- hcatConf.setIntVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 120);
+ hcatConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 120, TimeUnit.SECONDS);
hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
HCatSemanticAnalyzer.class.getName());
hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
Modified: hive/branches/tez/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java (original)
+++ hive/branches/tez/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java Tue Sep 2 19:56:56 2014
@@ -107,7 +107,7 @@ public class HiveEndPoint {
public StreamingConnection newConnection(final boolean createPartIfNotExists)
throws ConnectionError, InvalidPartition, InvalidTable, PartitionCreationFailed
, ImpersonationFailed , InterruptedException {
- return newConnection(null, createPartIfNotExists, null);
+ return newConnection(createPartIfNotExists, null, null);
}
/**
@@ -126,67 +126,63 @@ public class HiveEndPoint {
public StreamingConnection newConnection(final boolean createPartIfNotExists, HiveConf conf)
throws ConnectionError, InvalidPartition, InvalidTable, PartitionCreationFailed
, ImpersonationFailed , InterruptedException {
- return newConnection(null, createPartIfNotExists, conf);
+ return newConnection(createPartIfNotExists, conf, null);
}
/**
* Acquire a new connection to MetaStore for streaming
- * @param proxyUser User on whose behalf all hdfs and hive operations will be
- * performed on this connection. Set it to null or empty string
- * to connect as user of current process without impersonation.
- * Currently this argument is not supported and must be null
* @param createPartIfNotExists If true, the partition specified in the endpoint
* will be auto created if it does not exist
+ * @param authenticatedUser UserGroupInformation object obtained from successful authentication.
+ * Uses insecure mode if this argument is null.
* @return
- * @throws ConnectionError if problem connecting
+ * @throws ConnectionError if there is a connection problem
* @throws InvalidPartition if specified partition is not valid (createPartIfNotExists = false)
- * @throws ImpersonationFailed if not able to impersonate 'proxyUser'
+ * @throws ImpersonationFailed if not able to impersonate 'username'
* @throws IOException if there was an I/O error when acquiring connection
* @throws PartitionCreationFailed if failed to create partition
* @throws InterruptedException
*/
- private StreamingConnection newConnection(final String proxyUser,
- final boolean createPartIfNotExists, final HiveConf conf)
+ public StreamingConnection newConnection(final boolean createPartIfNotExists, final HiveConf conf,
+ final UserGroupInformation authenticatedUser)
throws ConnectionError, InvalidPartition,
InvalidTable, PartitionCreationFailed, ImpersonationFailed , InterruptedException {
- if (proxyUser ==null || proxyUser.trim().isEmpty() ) {
- return newConnectionImpl(System.getProperty("user.name"), null, createPartIfNotExists, conf);
+
+ if( authenticatedUser==null ) {
+ return newConnectionImpl(authenticatedUser, createPartIfNotExists, conf);
}
- final UserGroupInformation ugi = getUserGroupInfo(proxyUser);
+
try {
- return ugi.doAs (
- new PrivilegedExceptionAction<StreamingConnection>() {
+ return authenticatedUser.doAs (
+ new PrivilegedExceptionAction<StreamingConnection>() {
@Override
public StreamingConnection run()
throws ConnectionError, InvalidPartition, InvalidTable
, PartitionCreationFailed {
- return newConnectionImpl(proxyUser, ugi, createPartIfNotExists, conf);
+ return newConnectionImpl(authenticatedUser, createPartIfNotExists, conf);
}
- }
+ }
);
} catch (IOException e) {
- throw new ImpersonationFailed("Failed to impersonate '" + proxyUser +
- "' when acquiring connection", e);
+ throw new ConnectionError("Failed to connect as : " + authenticatedUser.getShortUserName(), e);
}
}
-
-
- private StreamingConnection newConnectionImpl(String proxyUser, UserGroupInformation ugi,
+ private StreamingConnection newConnectionImpl(UserGroupInformation ugi,
boolean createPartIfNotExists, HiveConf conf)
throws ConnectionError, InvalidPartition, InvalidTable
, PartitionCreationFailed {
- return new ConnectionImpl(this, proxyUser, ugi, conf, createPartIfNotExists);
+ return new ConnectionImpl(this, ugi, conf, createPartIfNotExists);
}
- private static UserGroupInformation getUserGroupInfo(String proxyUser)
+ private static UserGroupInformation getUserGroupInfo(String user)
throws ImpersonationFailed {
try {
return UserGroupInformation.createProxyUser(
- proxyUser, UserGroupInformation.getLoginUser());
+ user, UserGroupInformation.getLoginUser());
} catch (IOException e) {
- LOG.error("Unable to login as proxy user. Exception follows.", e);
- throw new ImpersonationFailed(proxyUser,e);
+ LOG.error("Unable to get UserGroupInfo for user : " + user, e);
+ throw new ImpersonationFailed(user,e);
}
}
@@ -242,14 +238,12 @@ public class HiveEndPoint {
private static class ConnectionImpl implements StreamingConnection {
private final IMetaStoreClient msClient;
private final HiveEndPoint endPt;
- private final String proxyUser;
private final UserGroupInformation ugi;
+ private final String username;
/**
- *
* @param endPoint end point to connect to
- * @param proxyUser can be null
- * @param ugi of prody user. If ugi is null, impersonation of proxy user will be disabled
+ * @param ugi on behalf of whom streaming is done. cannot be null
* @param conf HiveConf object
* @param createPart create the partition if it does not exist
* @throws ConnectionError if there is trouble connecting
@@ -257,15 +251,15 @@ public class HiveEndPoint {
* @throws InvalidTable if specified table does not exist
* @throws PartitionCreationFailed if createPart=true and not able to create partition
*/
- private ConnectionImpl(HiveEndPoint endPoint, String proxyUser, UserGroupInformation ugi,
+ private ConnectionImpl(HiveEndPoint endPoint, UserGroupInformation ugi,
HiveConf conf, boolean createPart)
throws ConnectionError, InvalidPartition, InvalidTable
, PartitionCreationFailed {
- this.proxyUser = proxyUser;
this.endPt = endPoint;
this.ugi = ugi;
+ this.username = ugi==null ? System.getProperty("user.name") : ugi.getShortUserName();
if (conf==null) {
- conf = HiveEndPoint.createHiveConf(this.getClass(),endPoint.metaStoreUri);
+ conf = HiveEndPoint.createHiveConf(this.getClass(), endPoint.metaStoreUri);
}
this.msClient = getMetaStoreClient(endPoint, conf);
if (createPart && !endPoint.partitionVals.isEmpty()) {
@@ -324,21 +318,21 @@ public class HiveEndPoint {
return ugi.doAs (
new PrivilegedExceptionAction<TransactionBatch>() {
@Override
- public TransactionBatch run() throws StreamingException {
+ public TransactionBatch run() throws StreamingException, InterruptedException {
return fetchTransactionBatchImpl(numTransactions, recordWriter);
}
}
);
} catch (IOException e) {
- throw new ImpersonationFailed("Failed impersonating proxy user '" + proxyUser +
- "' when acquiring Transaction Batch on endPoint " + endPt, e);
+ throw new ImpersonationFailed("Failed to fetch Txn Batch as user '" + ugi.getShortUserName()
+ + "' when acquiring Transaction Batch on endPoint " + endPt, e);
}
}
private TransactionBatch fetchTransactionBatchImpl(int numTransactions,
RecordWriter recordWriter)
- throws StreamingException, TransactionBatchUnAvailable {
- return new TransactionBatchImpl(proxyUser, ugi, endPt, numTransactions, msClient
+ throws StreamingException, TransactionBatchUnAvailable, InterruptedException {
+ return new TransactionBatchImpl(username, ugi, endPt, numTransactions, msClient
, recordWriter);
}
@@ -445,7 +439,7 @@ public class HiveEndPoint {
} // class ConnectionImpl
private static class TransactionBatchImpl implements TransactionBatch {
- private final String proxyUser;
+ private final String username;
private final UserGroupInformation ugi;
private final HiveEndPoint endPt;
private final IMetaStoreClient msClient;
@@ -461,7 +455,7 @@ public class HiveEndPoint {
/**
* Represents a batch of transactions acquired from MetaStore
*
- * @param proxyUser
+ * @param user
* @param ugi
* @param endPt
* @param numTxns
@@ -470,9 +464,9 @@ public class HiveEndPoint {
* @throws StreamingException if failed to create new RecordUpdater for batch
* @throws TransactionBatchUnAvailable if failed to acquire a new Transaction batch
*/
- private TransactionBatchImpl(String proxyUser, UserGroupInformation ugi, HiveEndPoint endPt
- , int numTxns, IMetaStoreClient msClient, RecordWriter recordWriter)
- throws StreamingException, TransactionBatchUnAvailable {
+ private TransactionBatchImpl(final String user, UserGroupInformation ugi, HiveEndPoint endPt
+ , final int numTxns, final IMetaStoreClient msClient, RecordWriter recordWriter)
+ throws StreamingException, TransactionBatchUnAvailable, InterruptedException {
try {
if ( endPt.partitionVals!=null && !endPt.partitionVals.isEmpty() ) {
Table tableObj = msClient.getTable(endPt.database, endPt.table);
@@ -481,20 +475,38 @@ public class HiveEndPoint {
} else {
partNameForLock = null;
}
- this.proxyUser = proxyUser;
+ this.username = user;
this.ugi = ugi;
this.endPt = endPt;
this.msClient = msClient;
this.recordWriter = recordWriter;
- this.txnIds = msClient.openTxns(proxyUser, numTxns).getTxn_ids();
+
+ txnIds = openTxnImpl(msClient, user, numTxns, ugi);
+
+
this.currentTxnIndex = -1;
this.state = TxnState.INACTIVE;
recordWriter.newBatch(txnIds.get(0), txnIds.get(txnIds.size()-1));
} catch (TException e) {
throw new TransactionBatchUnAvailable(endPt, e);
+ } catch (IOException e) {
+ throw new TransactionBatchUnAvailable(endPt, e);
}
}
+ private List<Long> openTxnImpl(final IMetaStoreClient msClient, final String user, final int numTxns, UserGroupInformation ugi)
+ throws IOException, TException, InterruptedException {
+ if(ugi==null) {
+ return msClient.openTxns(user, numTxns).getTxn_ids();
+ }
+ return (List<Long>) ugi.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ return msClient.openTxns(user, numTxns).getTxn_ids();
+ }
+ }) ;
+ }
+
@Override
public String toString() {
if (txnIds==null || txnIds.isEmpty()) {
@@ -526,8 +538,8 @@ public class HiveEndPoint {
}
);
} catch (IOException e) {
- throw new ImpersonationFailed("Failed impersonating proxyUser '" + proxyUser +
- "' when switch to next Transaction for endPoint :" + endPt, e);
+ throw new ImpersonationFailed("Failed switching to next Txn as user '" + username +
+ "' in Txn batch :" + this, e);
}
}
@@ -536,7 +548,7 @@ public class HiveEndPoint {
throw new InvalidTrasactionState("No more transactions available in" +
" current batch for end point : " + endPt);
++currentTxnIndex;
- lockRequest = createLockRequest(endPt, partNameForLock, proxyUser, getCurrentTxnId());
+ lockRequest = createLockRequest(endPt, partNameForLock, username, getCurrentTxnId());
try {
LockResponse res = msClient.lock(lockRequest);
if (res.getState() != LockState.ACQUIRED) {
@@ -608,8 +620,8 @@ public class HiveEndPoint {
}
);
} catch (IOException e) {
- throw new ImpersonationFailed("Failed impersonating proxy user '" + proxyUser +
- "' when writing to endPoint :" + endPt + ". Transaction Id: "
+ throw new ImpersonationFailed("Failed wirting as user '" + username +
+ "' to endPoint :" + endPt + ". Transaction Id: "
+ getCurrentTxnId(), e);
}
}
@@ -641,8 +653,8 @@ public class HiveEndPoint {
}
);
} catch (IOException e) {
- throw new ImpersonationFailed("Failed impersonating proxyUser '" + proxyUser +
- "' when writing to endPoint :" + endPt + ". Transaction Id: "
+ throw new ImpersonationFailed("Failed writing as user '" + username +
+ "' to endPoint :" + endPt + ". Transaction Id: "
+ getCurrentTxnId(), e);
}
}
@@ -680,9 +692,8 @@ public class HiveEndPoint {
}
);
} catch (IOException e) {
- throw new ImpersonationFailed("Failed impersonating proxy user '" + proxyUser +
- "' when committing Txn on endPoint :" + endPt + ". Transaction Id: "
- + getCurrentTxnId(), e);
+ throw new ImpersonationFailed("Failed committing Txn ID " + getCurrentTxnId() + " as user '"
+ + username + "'on endPoint :" + endPt + ". Transaction Id: ", e);
}
}
@@ -726,9 +737,8 @@ public class HiveEndPoint {
}
);
} catch (IOException e) {
- throw new ImpersonationFailed("Failed impersonating proxy user '" + proxyUser +
- "' when aborting Txn on endPoint :" + endPt + ". Transaction Id: "
- + getCurrentTxnId(), e);
+ throw new ImpersonationFailed("Failed aborting Txn " + getCurrentTxnId() + " as user '"
+ + username + "' on endPoint :" + endPt, e);
}
}
@@ -784,8 +794,8 @@ public class HiveEndPoint {
}
);
} catch (IOException e) {
- throw new ImpersonationFailed("Failed impersonating proxy user '" + proxyUser +
- "' when closing Txn Batch on endPoint :" + endPt, e);
+ throw new ImpersonationFailed("Failed closing Txn Batch as user '" + username +
+ "' on endPoint :" + endPt, e);
}
}
Modified: hive/branches/tez/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java (original)
+++ hive/branches/tez/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java Tue Sep 2 19:56:56 2014
@@ -51,6 +51,7 @@ public class HCatPartition {
private int createTime;
private int lastAccessTime;
private StorageDescriptor sd;
+ private List<HCatFieldSchema> columns; // Cache column-list from this.sd.
private Map<String, String> parameters;
// For use from within HCatClient.getPartitions().
@@ -68,6 +69,7 @@ public class HCatPartition {
}
this.sd = partition.getSd();
+ this.columns = getColumns(this.sd);
}
// For constructing HCatPartitions afresh, as an argument to HCatClient.addPartitions().
@@ -77,6 +79,7 @@ public class HCatPartition {
this.dbName = hcatTable.getDbName();
this.sd = new StorageDescriptor(hcatTable.getSd());
this.sd.setLocation(location);
+ this.columns = getColumns(this.sd);
this.createTime = (int)(System.currentTimeMillis()/1000);
this.lastAccessTime = -1;
this.values = new ArrayList<String>(hcatTable.getPartCols().size());
@@ -98,7 +101,7 @@ public class HCatPartition {
this.dbName = rhs.dbName;
this.sd = new StorageDescriptor(rhs.sd);
this.sd.setLocation(location);
-
+ this.columns = getColumns(this.sd);
this.createTime = (int) (System.currentTimeMillis() / 1000);
this.lastAccessTime = -1;
this.values = new ArrayList<String>(hcatTable.getPartCols().size());
@@ -112,6 +115,14 @@ public class HCatPartition {
}
}
+ private static List<HCatFieldSchema> getColumns(StorageDescriptor sd) throws HCatException {
+ ArrayList<HCatFieldSchema> columns = new ArrayList<HCatFieldSchema>(sd.getColsSize());
+ for (FieldSchema fieldSchema : sd.getCols()) {
+ columns.add(HCatSchemaUtils.getHCatFieldSchema(fieldSchema));
+ }
+ return columns;
+ }
+
// For use from HCatClient.addPartitions(), to construct from user-input.
Partition toHivePartition() throws HCatException {
Partition hivePtn = new Partition();
@@ -172,11 +183,7 @@ public class HCatPartition {
*
* @return the columns
*/
- public List<HCatFieldSchema> getColumns() throws HCatException {
- ArrayList<HCatFieldSchema> columns = new ArrayList<HCatFieldSchema>(sd.getColsSize());
- for (FieldSchema fieldSchema : sd.getCols()) {
- columns.add(HCatSchemaUtils.getHCatFieldSchema(fieldSchema));
- }
+ public List<HCatFieldSchema> getColumns() {
return columns;
}
Modified: hive/branches/tez/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java (original)
+++ hive/branches/tez/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java Tue Sep 2 19:56:56 2014
@@ -653,7 +653,7 @@ public class Server {
verifyParam(inputs, "input");
verifyParam(mapper, "mapper");
verifyParam(reducer, "reducer");
-
+
Map<String, Object> userArgs = new HashMap<String, Object>();
userArgs.put("user.name", getDoAsUser());
userArgs.put("input", inputs);
@@ -680,8 +680,8 @@ public class Server {
/**
* Run a MapReduce Jar job.
* Params correspond to the REST api params
- * @param usesHcatalog if {@code true}, means the Jar uses HCat and thus needs to access
- * metastore, which requires additional steps for WebHCat to perform in a secure cluster.
+ * @param usesHcatalog if {@code true}, means the Jar uses HCat and thus needs to access
+ * metastore, which requires additional steps for WebHCat to perform in a secure cluster.
* @param callback URL which WebHCat will call when the hive job finishes
* @see org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob
*/
@@ -703,7 +703,7 @@ public class Server {
verifyUser();
verifyParam(jar, "jar");
verifyParam(mainClass, "class");
-
+
Map<String, Object> userArgs = new HashMap<String, Object>();
userArgs.put("user.name", getDoAsUser());
userArgs.put("jar", jar);
@@ -729,7 +729,7 @@ public class Server {
* Run a Pig job.
* Params correspond to the REST api params. If '-useHCatalog' is in the {@code pigArgs, usesHcatalog},
* is interpreted as true.
- * @param usesHcatalog if {@code true}, means the Pig script uses HCat and thus needs to access
+ * @param usesHcatalog if {@code true}, means the Pig script uses HCat and thus needs to access
* metastore, which requires additional steps for WebHCat to perform in a secure cluster.
* This does nothing to ensure that Pig is installed on target node in the cluster.
* @param callback URL which WebHCat will call when the hive job finishes
@@ -752,7 +752,7 @@ public class Server {
if (execute == null && srcFile == null) {
throw new BadParam("Either execute or file parameter required");
}
-
+
//add all function arguments to a map
Map<String, Object> userArgs = new HashMap<String, Object>();
userArgs.put("user.name", getDoAsUser());
@@ -819,7 +819,7 @@ public class Server {
* @param execute SQL statement to run, equivalent to "-e" from hive command line
* @param srcFile name of hive script file to run, equivalent to "-f" from hive
* command line
- * @param hiveArgs additional command line argument passed to the hive command line.
+ * @param hiveArgs additional command line argument passed to the hive command line.
* Please check https://cwiki.apache.org/Hive/languagemanual-cli.html
* for detailed explanation of command line arguments
* @param otherFiles additional files to be shipped to the launcher, such as the jars
@@ -846,7 +846,7 @@ public class Server {
if (execute == null && srcFile == null) {
throw new BadParam("Either execute or file parameter required");
}
-
+
//add all function arguments to a map
Map<String, Object> userArgs = new HashMap<String, Object>();
userArgs.put("user.name", getDoAsUser());
@@ -903,42 +903,42 @@ public class Server {
* Example usages:
* 1. curl -s 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan'
* Return all the Job IDs submitted by hsubramaniyan
- * 2. curl -s
+ * 2. curl -s
* 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&showall=true'
* Return all the Job IDs that are visible to hsubramaniyan
* 3. curl -s
* 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&jobid=job_201312091733_0003'
* Return all the Job IDs for hsubramaniyan after job_201312091733_0003.
- * 4. curl -s 'http://localhost:50111/templeton/v1/jobs?
+ * 4. curl -s 'http://localhost:50111/templeton/v1/jobs?
* user.name=hsubramaniyan&jobid=job_201312091733_0003&numrecords=5'
- * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after job_201312091733_0003.
- * 5. curl -s
+ * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after job_201312091733_0003.
+ * 5. curl -s
* 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&numrecords=5'
- * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after sorting the Job ID list
+ * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after sorting the Job ID list
* lexicographically.
* </p>
* <p>
* Supporting pagination using "jobid" and "numrecords" parameters:
* Step 1: Get the start "jobid" = job_xxx_000, "numrecords" = n
- * Step 2: Issue a curl command by specifying the user-defined "numrecords" and "jobid"
- * Step 3: If list obtained from Step 2 has size equal to "numrecords", retrieve the list's
+ * Step 2: Issue a curl command by specifying the user-defined "numrecords" and "jobid"
+ * Step 3: If list obtained from Step 2 has size equal to "numrecords", retrieve the list's
* last record and get the Job Id of the last record as job_yyy_k, else quit.
* Step 4: set "jobid"=job_yyy_k and go to step 2.
- * </p>
+ * </p>
* @param fields If "fields" set to "*", the request will return full details of the job.
* If "fields" is missing, will only return the job ID. Currently the value can only
* be "*", other values are not allowed and will throw exception.
* @param showall If "showall" is set to "true", the request will return all jobs the user
* has permission to view, not only the jobs belonging to the user.
- * @param jobid If "jobid" is present, the records whose Job Id is lexicographically greater
- * than "jobid" are only returned. For example, if "jobid" = "job_201312091733_0001",
- * the jobs whose Job ID is greater than "job_201312091733_0001" are returned. The number of
+ * @param jobid If "jobid" is present, the records whose Job Id is lexicographically greater
+ * than "jobid" are only returned. For example, if "jobid" = "job_201312091733_0001",
+ * the jobs whose Job ID is greater than "job_201312091733_0001" are returned. The number of
* records returned depends on the value of "numrecords".
- * @param numrecords If the "jobid" and "numrecords" parameters are present, the top #numrecords
- * records appearing after "jobid" will be returned after sorting the Job Id list
- * lexicographically.
- * If "jobid" parameter is missing and "numrecords" is present, the top #numrecords will
- * be returned after lexicographically sorting the Job Id list. If "jobid" parameter is present
+ * @param numrecords If the "jobid" and "numrecords" parameters are present, the top #numrecords
+ * records appearing after "jobid" will be returned after sorting the Job Id list
+ * lexicographically.
+ * If "jobid" parameter is missing and "numrecords" is present, the top #numrecords will
+ * be returned after lexicographically sorting the Job Id list. If "jobid" parameter is present
* and "numrecords" is missing, all the records whose Job Id is greater than "jobid" are returned.
* @return list of job items based on the filter conditions specified by the user.
*/
@@ -950,7 +950,7 @@ public class Server {
@QueryParam("jobid") String jobid,
@QueryParam("numrecords") String numrecords)
throws NotAuthorizedException, BadParam, IOException, InterruptedException {
-
+
verifyUser();
boolean showDetails = false;
@@ -971,9 +971,9 @@ public class Server {
try {
if (numrecords != null) {
numRecords = Integer.parseInt(numrecords);
- if (numRecords <= 0) {
- throw new BadParam("numrecords should be an integer > 0");
- }
+ if (numRecords <= 0) {
+ throw new BadParam("numrecords should be an integer > 0");
+ }
}
else {
numRecords = -1;
@@ -983,18 +983,18 @@ public class Server {
throw new BadParam("Invalid numrecords format: numrecords should be an integer > 0");
}
- // Sort the list lexicographically
+ // Sort the list lexicographically
Collections.sort(list);
for (String job : list) {
// If numRecords = -1, fetch all records.
// Hence skip all the below checks when numRecords = -1.
if (numRecords != -1) {
- // If currRecord >= numRecords, we have already fetched the top #numRecords
+ // If currRecord >= numRecords, we have already fetched the top #numRecords
if (currRecord >= numRecords) {
break;
- }
- // If the current record needs to be returned based on the
+ }
+ // If the current record needs to be returned based on the
// filter conditions specified by the user, increment the counter
else if ((jobid != null && job.compareTo(jobid) > 0) || jobid == null) {
currRecord++;
@@ -1101,7 +1101,7 @@ public class Server {
* value of user.name query param, in kerberos mode it's the kinit'ed user.
*/
private String getRequestingUser() {
- if (theSecurityContext == null) {
+ if (theSecurityContext == null) {
return null;
}
String userName = null;
@@ -1114,7 +1114,7 @@ public class Server {
if(userName == null) {
return null;
}
- //map hue/foo.bar@something.com->hue since user group checks
+ //map hue/foo.bar@something.com->hue since user group checks
// and config files are in terms of short name
return UserGroupInformation.createRemoteUser(userName).getShortUserName();
}
@@ -1161,7 +1161,7 @@ public class Server {
return unkHost;
}
}
-
+
private void checkEnableLogPrerequisite(boolean enablelog, String statusdir) throws BadParam {
if (enablelog && !TempletonUtils.isset(statusdir))
throw new BadParam("enablelog is only applicable when statusdir is set");
Modified: hive/branches/tez/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java
URL: http://svn.apache.org/viewvc/hive/branches/tez/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java?rev=1622108&r1=1622107&r2=1622108&view=diff
==============================================================================
--- hive/branches/tez/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java (original)
+++ hive/branches/tez/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java Tue Sep 2 19:56:56 2014
@@ -169,9 +169,9 @@ public class JobState {
String childJobIDs = getField("children");
if (childJobIDs != null) {
for (String jobid : childJobIDs.split(",")) {
- children.add(new JobState(jobid, config));
+ children.add(new JobState(jobid, config));
}
- }
+ }
return children;
}