You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@griffin.apache.org by gu...@apache.org on 2017/05/26 09:17:33 UTC

[3/9] incubator-griffin git commit: [GRIFFIN-19] update document of docker

http://git-wip-us.apache.org/repos/asf/incubator-griffin/blob/2de6a549/docker/griffin_env/conf/hive/hive-site.xml.template
----------------------------------------------------------------------
diff --git a/docker/griffin_env/conf/hive/hive-site.xml.template b/docker/griffin_env/conf/hive/hive-site.xml.template
new file mode 100644
index 0000000..f641dee
--- /dev/null
+++ b/docker/griffin_env/conf/hive/hive-site.xml.template
@@ -0,0 +1,3911 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--><configuration>
+  <!-- WARNING!!! This file is auto generated for documentation purposes ONLY! -->
+  <!-- WARNING!!! Any changes you make to this file will be ignored by Hive.   -->
+  <!-- WARNING!!! You must make your changes in hive-site.xml instead.         -->
+  <!-- Hive Execution Parameters -->
+  <property>
+    <name>hive.exec.script.wrapper</name>
+    <value/>
+    <description/>
+  </property>
+  <property>
+    <name>hive.exec.plan</name>
+    <value/>
+    <description/>
+  </property>
+  <property>
+    <name>hive.plan.serialization.format</name>
+    <value>kryo</value>
+    <description>
+      Query plan format serialization between client and task nodes. 
+      Two supported values are : kryo and javaXML. Kryo is default.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.stagingdir</name>
+    <value>.hive-staging</value>
+    <description>Directory name that will be created inside table locations in order to support HDFS encryption. This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans.</description>
+  </property>
+  <property>
+    <name>hive.exec.scratchdir</name>
+    <value>/tmp/hive-${user.name}</value>
+    <description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/&lt;username&gt; is created, with ${hive.scratch.dir.permission}.</description>
+  </property>
+  <property>
+    <name>hive.exec.local.scratchdir</name>
+    <value>/tmp/${user.name}</value>
+    <description>Local scratch space for Hive jobs</description>
+  </property>
+  <property>
+    <name>hive.downloaded.resources.dir</name>
+    <value>/tmp/${user.name}_resources</value>
+    <description>Temporary local directory for added resources in the remote file system.</description>
+  </property>
+  <property>
+    <name>hive.scratch.dir.permission</name>
+    <value>733</value>
+    <description>The permission for the user specific scratch directories that get created.</description>
+  </property>
+  <property>
+    <name>hive.exec.submitviachild</name>
+    <value>false</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.exec.submit.local.task.via.child</name>
+    <value>true</value>
+    <description>
+      Determines whether local tasks (typically mapjoin hashtable generation phase) runs in 
+      separate JVM (true recommended) or not. 
+      Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.script.maxerrsize</name>
+    <value>100000</value>
+    <description>
+      Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). 
+      This prevents runaway scripts from filling logs partitions to capacity
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.script.allow.partial.consumption</name>
+    <value>false</value>
+    <description>
+      When enabled, this option allows a user script to exit successfully without consuming 
+      all the data from the standard input.
+    </description>
+  </property>
+  <property>
+    <name>stream.stderr.reporter.prefix</name>
+    <value>reporter:</value>
+    <description>Streaming jobs that log to standard error with this prefix can log counter or status information.</description>
+  </property>
+  <property>
+    <name>stream.stderr.reporter.enabled</name>
+    <value>true</value>
+    <description>Enable consumption of status and counter messages for streaming jobs.</description>
+  </property>
+  <property>
+    <name>hive.exec.compress.output</name>
+    <value>false</value>
+    <description>
+      This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. 
+      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.compress.intermediate</name>
+    <value>false</value>
+    <description>
+      This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. 
+      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
+    </description>
+  </property>
+  <property>
+    <name>hive.intermediate.compression.codec</name>
+    <value/>
+    <description/>
+  </property>
+  <property>
+    <name>hive.intermediate.compression.type</name>
+    <value/>
+    <description/>
+  </property>
+  <property>
+    <name>hive.exec.reducers.bytes.per.reducer</name>
+    <value>256000000</value>
+    <description>size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers.</description>
+  </property>
+  <property>
+    <name>hive.exec.reducers.max</name>
+    <value>1009</value>
+    <description>
+      max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is
+      negative, Hive will use this one as the max number of reducers when automatically determine number of reducers.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.pre.hooks</name>
+    <value/>
+    <description>
+      Comma-separated list of pre-execution hooks to be invoked for each statement. 
+      A pre-execution hook is specified as the name of a Java class which implements the 
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.post.hooks</name>
+    <value/>
+    <description>
+      Comma-separated list of post-execution hooks to be invoked for each statement. 
+      A post-execution hook is specified as the name of a Java class which implements the 
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.failure.hooks</name>
+    <value/>
+    <description>
+      Comma-separated list of on-failure hooks to be invoked for each statement. 
+      An on-failure hook is specified as the name of Java class which implements the 
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.query.redactor.hooks</name>
+    <value/>
+    <description>
+      Comma-separated list of hooks to be invoked for each query which can 
+      tranform the query before it's placed in the job.xml file. Must be a Java class which 
+      extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class.
+    </description>
+  </property>
+  <property>
+    <name>hive.client.stats.publishers</name>
+    <value/>
+    <description>
+      Comma-separated list of statistics publishers to be invoked on counters on each job. 
+      A client stats publisher is specified as the name of a Java class which implements the 
+      org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.parallel</name>
+    <value>false</value>
+    <description>Whether to execute jobs in parallel</description>
+  </property>
+  <property>
+    <name>hive.exec.parallel.thread.number</name>
+    <value>8</value>
+    <description>How many jobs at most can be executed in parallel</description>
+  </property>
+  <property>
+    <name>hive.mapred.reduce.tasks.speculative.execution</name>
+    <value>true</value>
+    <description>Whether speculative execution for reducers should be turned on. </description>
+  </property>
+  <property>
+    <name>hive.exec.counters.pull.interval</name>
+    <value>1000</value>
+    <description>
+      The interval with which to poll the JobTracker for the counters the running job. 
+      The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.dynamic.partition</name>
+    <value>true</value>
+    <description>Whether or not to allow dynamic partitions in DML/DDL.</description>
+  </property>
+  <property>
+    <name>hive.exec.dynamic.partition.mode</name>
+    <value>strict</value>
+    <description>
+      In strict mode, the user must specify at least one static partition
+      in case the user accidentally overwrites all partitions.
+      In nonstrict mode all partitions are allowed to be dynamic.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.max.dynamic.partitions</name>
+    <value>1000</value>
+    <description>Maximum number of dynamic partitions allowed to be created in total.</description>
+  </property>
+  <property>
+    <name>hive.exec.max.dynamic.partitions.pernode</name>
+    <value>100</value>
+    <description>Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.</description>
+  </property>
+  <property>
+    <name>hive.exec.max.created.files</name>
+    <value>100000</value>
+    <description>Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.</description>
+  </property>
+  <property>
+    <name>hive.exec.default.partition.name</name>
+    <value>__HIVE_DEFAULT_PARTITION__</value>
+    <description>
+      The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. 
+      This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). 
+      The user has to be aware that the dynamic partition value should not contain this value to avoid confusions.
+    </description>
+  </property>
+  <property>
+    <name>hive.lockmgr.zookeeper.default.partition.name</name>
+    <value>__HIVE_DEFAULT_ZOOKEEPER_PARTITION__</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.exec.show.job.failure.debug.info</name>
+    <value>true</value>
+    <description>
+      If a job fails, whether to provide a link in the CLI to the task with the
+      most failures, along with debugging hints if applicable.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.job.debug.capture.stacktraces</name>
+    <value>true</value>
+    <description>
+      Whether or not stack traces parsed from the task logs of a sampled failed task 
+      for each failed job should be stored in the SessionState
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.job.debug.timeout</name>
+    <value>30000</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.exec.tasklog.debug.timeout</name>
+    <value>20000</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.output.file.extension</name>
+    <value/>
+    <description>
+      String used as a file extension for output files. 
+      If not set, defaults to the codec extension for text files (e.g. ".gz"), or no extension otherwise.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.mode.local.auto</name>
+    <value>false</value>
+    <description>Let Hive determine whether to run in local mode automatically</description>
+  </property>
+  <property>
+    <name>hive.exec.mode.local.auto.inputbytes.max</name>
+    <value>134217728</value>
+    <description>When hive.exec.mode.local.auto is true, input bytes should less than this for local mode.</description>
+  </property>
+  <property>
+    <name>hive.exec.mode.local.auto.input.files.max</name>
+    <value>4</value>
+    <description>When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode.</description>
+  </property>
+  <property>
+    <name>hive.exec.drop.ignorenonexistent</name>
+    <value>true</value>
+    <description>Do not report an error if DROP TABLE/VIEW/Index/Function specifies a non-existent table/view/index/function</description>
+  </property>
+  <property>
+    <name>hive.ignore.mapjoin.hint</name>
+    <value>true</value>
+    <description>Ignore the mapjoin hint</description>
+  </property>
+  <property>
+    <name>hive.file.max.footer</name>
+    <value>100</value>
+    <description>maximum number of lines for footer user can define for a table file</description>
+  </property>
+  <property>
+    <name>hive.resultset.use.unique.column.names</name>
+    <value>true</value>
+    <description>
+      Make column names unique in the result set by qualifying column names with table alias if needed.
+      Table alias will be added to column names for queries of type "select *" or 
+      if query explicitly uses table alias "select r1.x..".
+    </description>
+  </property>
+  <property>
+    <name>fs.har.impl</name>
+    <value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value>
+    <description>The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20</description>
+  </property>
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/user/hive/warehouse</value>
+    <description>location of default database for the warehouse</description>
+  </property>
+  <property>
+    <name>hive.metastore.uris</name>
+    <value>thrift://HOSTNAME:9083</value>
+    <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
+  </property>
+  <property>
+    <name>hive.metastore.connect.retries</name>
+    <value>3</value>
+    <description>Number of retries while opening a connection to metastore</description>
+  </property>
+  <property>
+    <name>hive.metastore.failure.retries</name>
+    <value>1</value>
+    <description>Number of retries upon failure of Thrift metastore calls</description>
+  </property>
+  <property>
+    <name>hive.metastore.client.connect.retry.delay</name>
+    <value>1s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Number of seconds for the client to wait between consecutive connection attempts
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>600s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      MetaStore Client socket timeout in seconds
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.client.socket.lifetime</name>
+    <value>0s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      MetaStore Client socket lifetime in seconds. After this time is exceeded, client
+      reconnects on the next MetaStore operation. A value of 0s means the connection
+      has an infinite lifetime.
+    </description>
+  </property>
+  <property>
+    <name>javax.jdo.option.ConnectionPassword</name>
+    <value>123456</value>
+    <description>password to use against metastore database</description>
+  </property>
+  <property>
+    <name>hive.metastore.ds.connection.url.hook</name>
+    <value/>
+    <description>Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used</description>
+  </property>
+  <property>
+    <name>javax.jdo.option.Multithreaded</name>
+    <value>true</value>
+    <description>Set this to true if multiple threads access metastore through JDO concurrently.</description>
+  </property>
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <!--<value>jdbc:derby:;databaseName=metastore_db;create=true</value>-->
+<value>jdbc:mysql://HOSTNAME:3306/metastore?createDatabaseIfNotExist=true</value>
+    <description>JDBC connect string for a JDBC metastore</description>
+  </property>
+  <property>
+    <name>hive.hmshandler.retry.attempts</name>
+    <value>10</value>
+    <description>The number of times to retry a HMSHandler call if there were a connection error.</description>
+  </property>
+  <property>
+    <name>hive.hmshandler.retry.interval</name>
+    <value>2000ms</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified.
+      The time between HMSHandler retry attempts on failure.
+    </description>
+  </property>
+  <property>
+    <name>hive.hmshandler.force.reload.conf</name>
+    <value>false</value>
+    <description>
+      Whether to force reloading of the HMSHandler configuration (including
+      the connection URL, before the next metastore query that accesses the
+      datastore. Once reloaded, this value is reset to false. Used for
+      testing only.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.server.max.message.size</name>
+    <value>104857600</value>
+    <description>Maximum message size in bytes a HMS will accept.</description>
+  </property>
+  <property>
+    <name>hive.metastore.server.min.threads</name>
+    <value>200</value>
+    <description>Minimum number of worker threads in the Thrift server's pool.</description>
+  </property>
+  <property>
+    <name>hive.metastore.server.max.threads</name>
+    <value>1000</value>
+    <description>Maximum number of worker threads in the Thrift server's pool.</description>
+  </property>
+  <property>
+    <name>hive.metastore.server.tcp.keepalive</name>
+    <value>true</value>
+    <description>Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections.</description>
+  </property>
+  <property>
+    <name>hive.metastore.archive.intermediate.original</name>
+    <value>_INTERMEDIATE_ORIGINAL</value>
+    <description>
+      Intermediate dir suffixes used for archiving. Not important what they
+      are, as long as collisions are avoided
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.archive.intermediate.archived</name>
+    <value>_INTERMEDIATE_ARCHIVED</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.metastore.archive.intermediate.extracted</name>
+    <value>_INTERMEDIATE_EXTRACTED</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.metastore.kerberos.keytab.file</name>
+    <value/>
+    <description>The path to the Kerberos Keytab file containing the metastore Thrift server's service principal.</description>
+  </property>
+  <property>
+    <name>hive.metastore.kerberos.principal</name>
+    <value>hive-metastore/_HOST@EXAMPLE.COM</value>
+    <description>
+      The service principal for the metastore Thrift server. 
+      The special string _HOST will be replaced automatically with the correct host name.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.sasl.enabled</name>
+    <value>false</value>
+    <description>If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos.</description>
+  </property>
+  <property>
+    <name>hive.metastore.thrift.framed.transport.enabled</name>
+    <value>false</value>
+    <description>If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used.</description>
+  </property>
+  <property>
+    <name>hive.metastore.thrift.compact.protocol.enabled</name>
+    <value>false</value>
+    <description>
+      If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.
+      Setting it to true will break compatibility with older clients running TBinaryProtocol.
+    </description>
+  </property>
+  <property>
+    <name>hive.cluster.delegation.token.store.class</name>
+    <value>org.apache.hadoop.hive.thrift.MemoryTokenStore</value>
+    <description>The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.</description>
+  </property>
+  <property>
+    <name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
+    <value/>
+    <description>
+      The ZooKeeper token store connect string. You can re-use the configuration value
+      set in hive.zookeeper.quorum, by leaving this parameter unset.
+    </description>
+  </property>
+  <property>
+    <name>hive.cluster.delegation.token.store.zookeeper.znode</name>
+    <value>/hivedelegation</value>
+    <description>
+      The root path for token store data. Note that this is used by both HiveServer2 and
+      MetaStore to store delegation Token. One directory gets created for each of them.
+      The final directory names would have the servername appended to it (HIVESERVER2,
+      METASTORE).
+    </description>
+  </property>
+  <property>
+    <name>hive.cluster.delegation.token.store.zookeeper.acl</name>
+    <value/>
+    <description>
+      ACL for token store entries. Comma separated list of ACL entries. For example:
+      sasl:hive/host1@MY.DOMAIN:cdrwa,sasl:hive/host2@MY.DOMAIN:cdrwa
+      Defaults to all permissions for the hiveserver2/metastore process user.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.cache.pinobjtypes</name>
+    <value>Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order</value>
+    <description>List of comma separated metastore object types that should be pinned in the cache</description>
+  </property>
+  <property>
+    <name>datanucleus.connectionPoolingType</name>
+    <value>BONECP</value>
+    <description>Specify connection pool library for datanucleus</description>
+  </property>
+  <property>
+    <name>datanucleus.validateTables</name>
+    <value>false</value>
+    <description>validates existing schema against code. turn this on if you want to verify existing schema</description>
+  </property>
+  <property>
+    <name>datanucleus.validateColumns</name>
+    <value>false</value>
+    <description>validates existing schema against code. turn this on if you want to verify existing schema</description>
+  </property>
+  <property>
+    <name>datanucleus.validateConstraints</name>
+    <value>false</value>
+    <description>validates existing schema against code. turn this on if you want to verify existing schema</description>
+  </property>
+  <property>
+    <name>datanucleus.storeManagerType</name>
+    <value>rdbms</value>
+    <description>metadata store type</description>
+  </property>
+  <property>
+    <name>datanucleus.autoCreateSchema</name>
+    <value>false</value>
+    <description>creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once</description>
+  </property>
+  <property>
+    <name>datanucleus.fixedDatastore</name>
+    <value>true</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.metastore.schema.verification</name>
+    <value>false</value>
+    <description>
+      Enforce metastore schema version consistency.
+      True: Verify that version information stored in metastore matches with one from Hive jars.  Also disable automatic
+            schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures
+            proper metastore schema migration. (Default)
+      False: Warn if the version information stored in metastore doesn't match with one from in Hive jars.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.schema.verification.record.version</name>
+    <value>true</value>
+    <description>
+      When true the current MS version is recorded in the VERSION table. If this is disabled and verification is
+       enabled the MS will be unusable.
+    </description>
+  </property>
+  <property>
+    <name>datanucleus.autoStartMechanismMode</name>
+    <value>checked</value>
+    <description>throw exception if metadata tables are incorrect</description>
+  </property>
+  <property>
+    <name>datanucleus.transactionIsolation</name>
+    <value>read-committed</value>
+    <description>Default transaction isolation level for identity generation.</description>
+  </property>
+  <property>
+    <name>datanucleus.cache.level2</name>
+    <value>false</value>
+    <description>Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server</description>
+  </property>
+  <property>
+    <name>datanucleus.cache.level2.type</name>
+    <value>none</value>
+    <description/>
+  </property>
+  <property>
+    <name>datanucleus.identifierFactory</name>
+    <value>datanucleus1</value>
+    <description>
+      Name of the identifier factory to use when generating table/column names etc. 
+      'datanucleus1' is used for backward compatibility with DataNucleus v1
+    </description>
+  </property>
+  <property>
+    <name>datanucleus.rdbms.useLegacyNativeValueStrategy</name>
+    <value>true</value>
+    <description/>
+  </property>
+  <property>
+    <name>datanucleus.plugin.pluginRegistryBundleCheck</name>
+    <value>LOG</value>
+    <description>Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]</description>
+  </property>
+  <property>
+    <name>hive.metastore.batch.retrieve.max</name>
+    <value>300</value>
+    <description>
+      Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. 
+      The higher the number, the less the number of round trips is needed to the Hive metastore server, 
+      but it may also cause higher memory requirement at the client side.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.batch.retrieve.table.partition.max</name>
+    <value>1000</value>
+    <description>Maximum number of table partitions that metastore internally retrieves in one batch.</description>
+  </property>
+  <property>
+    <name>hive.metastore.init.hooks</name>
+    <value/>
+    <description>
+      A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. 
+      An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.pre.event.listeners</name>
+    <value/>
+    <description>List of comma separated listeners for metastore events.</description>
+  </property>
+  <property>
+    <name>hive.metastore.event.listeners</name>
+    <value/>
+    <description/>
+  </property>
+  <property>
+    <name>hive.metastore.event.db.listener.timetolive</name>
+    <value>86400s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      time after which events will be removed from the database listener queue
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.authorization.storage.checks</name>
+    <value>false</value>
+    <description>
+      Should the metastore do authorization checks against the underlying storage (usually hdfs) 
+      for operations like drop-partition (disallow the drop-partition if the user in
+      question doesn't have permissions to delete the corresponding directory
+      on the storage).
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.event.clean.freq</name>
+    <value>0s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Frequency at which timer task runs to purge expired events in metastore.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.event.expiry.duration</name>
+    <value>0s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Duration after which events expire from events table
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.execute.setugi</name>
+    <value>true</value>
+    <description>
+      In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using 
+      the client's reported user and group permissions. Note that this property must be set on 
+      both the client and server sides. Further note that its best effort. 
+      If client sets its to true and server sets it to false, client setting will be ignored.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.partition.name.whitelist.pattern</name>
+    <value/>
+    <description>Partition names will be checked against this regex pattern and rejected if not matched.</description>
+  </property>
+  <property>
+    <name>hive.metastore.integral.jdo.pushdown</name>
+    <value>false</value>
+    <description>
+      Allow JDO query pushdown for integral partition columns in metastore. Off by default. This
+      improves metastore perf for integral columns, especially if there's a large number of partitions.
+      However, it doesn't work correctly with integral values that are not normalized (e.g. have
+      leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization
+      is also irrelevant.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.try.direct.sql</name>
+    <value>true</value>
+    <description>
+      Whether the Hive metastore should try to use direct SQL queries instead of the
+      DataNucleus for certain read paths. This can improve metastore performance when
+      fetching many partitions or column statistics by orders of magnitude; however, it
+      is not guaranteed to work on all RDBMS-es and all versions. In case of SQL failures,
+      the metastore will fall back to the DataNucleus, so it's safe even if SQL doesn't
+      work for all queries on your datastore. If all SQL queries fail (for example, your
+      metastore is backed by MongoDB), you might want to disable this to save the
+      try-and-fall-back cost.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.direct.sql.batch.size</name>
+    <value>0</value>
+    <description>
+      Batch size for partition and other object retrieval from the underlying DB in direct
+      SQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations
+      that necessitate this. For DBs that can handle the queries, this isn't necessary and
+      may impede performance. -1 means no batching, 0 means automatic batching.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.try.direct.sql.ddl</name>
+    <value>true</value>
+    <description>
+      Same as hive.metastore.try.direct.sql, for read statements within a transaction that
+      modifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL
+      select query has incorrect syntax or something similar inside a transaction, the
+      entire transaction will fail and fall-back to DataNucleus will not be possible. You
+      should disable the usage of direct SQL inside transactions if that happens in your case.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.orm.retrieveMapNullsAsEmptyStrings</name>
+    <value>false</value>
+    <description>Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, pruning is the correct behaviour</description>
+  </property>
+  <property>
+    <name>hive.metastore.disallow.incompatible.col.type.changes</name>
+    <value>false</value>
+    <description>
+      If true (default is false), ALTER TABLE operations which change the type of a
+      column (say STRING) to an incompatible type (say MAP) are disallowed.
+      RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the
+      datatypes can be converted from string to any type. The map is also serialized as
+      a string, which can be read as a string as well. However, with any binary
+      serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions
+      when subsequently trying to access old partitions.
+      
+      Primitive types like INT, STRING, BIGINT, etc., are compatible with each other and are
+      not blocked.
+      
+      See HIVE-4409 for more details.
+    </description>
+  </property>
+  <property>
+    <name>hive.table.parameters.default</name>
+    <value/>
+    <description>Default property values for newly created tables</description>
+  </property>
+  <property>
+    <name>hive.ddl.createtablelike.properties.whitelist</name>
+    <value/>
+    <description>Table Properties to copy over when executing a Create Table Like.</description>
+  </property>
+  <property>
+    <name>hive.metastore.rawstore.impl</name>
+    <value>org.apache.hadoop.hive.metastore.ObjectStore</value>
+    <description>
+      Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. 
+      This class is used to store and retrieval of raw metadata objects such as table, database
+    </description>
+  </property>
+  <property>
+    <name>javax.jdo.option.ConnectionDriverName</name>
+    <!--<value>org.apache.derby.jdbc.EmbeddedDriver</value>-->
+<value>com.mysql.jdbc.Driver</value>
+    <description>Driver class name for a JDBC metastore</description>
+  </property>
+  <property>
+    <name>javax.jdo.PersistenceManagerFactoryClass</name>
+    <value>org.datanucleus.api.jdo.JDOPersistenceManagerFactory</value>
+    <description>class implementing the jdo persistence</description>
+  </property>
+  <property>
+    <name>hive.metastore.expression.proxy</name>
+    <value>org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore</value>
+    <description/>
+  </property>
+  <property>
+    <name>javax.jdo.option.DetachAllOnCommit</name>
+    <value>true</value>
+    <description>Detaches all objects from session so that they can be used after transaction is committed</description>
+  </property>
+  <property>
+    <name>javax.jdo.option.NonTransactionalRead</name>
+    <value>true</value>
+    <description>Reads outside of transactions</description>
+  </property>
+  <property>
+    <name>javax.jdo.option.ConnectionUserName</name>
+    <value>hive</value>
+    <description>Username to use against metastore database</description>
+  </property>
+  <property>
+    <name>hive.metastore.end.function.listeners</name>
+    <value/>
+    <description>List of comma separated listeners for the end of metastore functions.</description>
+  </property>
+  <property>
+    <name>hive.metastore.partition.inherit.table.properties</name>
+    <value/>
+    <description>
+      List of comma separated keys occurring in table properties which will get inherited to newly created partitions. 
+      * implies all the keys will get inherited.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.filter.hook</name>
+    <value>org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl</value>
+    <description>Metastore hook class for filtering the metadata read results. If hive.security.authorization.manageris set to instance of HiveAuthorizerFactory, then this value is ignored.</description>
+  </property>
+  <property>
+    <name>hive.metastore.dml.events</name>
+    <value>false</value>
+    <description>If true, the metastore will be asked to fire events for DML operations</description>
+  </property>
+  <property>
+    <name>hive.metastore.client.drop.partitions.using.expressions</name>
+    <value>true</value>
+    <description>Choose whether dropping partitions with HCatClient pushes the partition-predicate to the metastore, or drops partitions iteratively</description>
+  </property>
+  <property>
+    <name>hive.metastore.aggregate.stats.cache.enabled</name>
+    <value>true</value>
+    <description>Whether aggregate stats caching is enabled or not.</description>
+  </property>
+  <property>
+    <name>hive.metastore.aggregate.stats.cache.size</name>
+    <value>10000</value>
+    <description>Maximum number of aggregate stats nodes that we will place in the metastore aggregate stats cache.</description>
+  </property>
+  <property>
+    <name>hive.metastore.aggregate.stats.cache.max.partitions</name>
+    <value>10000</value>
+    <description>Maximum number of partitions that are aggregated per cache node.</description>
+  </property>
+  <property>
+    <name>hive.metastore.aggregate.stats.cache.fpp</name>
+    <value>0.01</value>
+    <description>Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%).</description>
+  </property>
+  <property>
+    <name>hive.metastore.aggregate.stats.cache.max.variance</name>
+    <value>0.01</value>
+    <description>Maximum tolerable variance in number of partitions between a cached node and our request (default 1%).</description>
+  </property>
+  <property>
+    <name>hive.metastore.aggregate.stats.cache.ttl</name>
+    <value>600s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Number of seconds for a cached node to be active in the cache before they become stale.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.aggregate.stats.cache.max.writer.wait</name>
+    <value>5000ms</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified.
+      Number of milliseconds a writer will wait to acquire the writelock before giving up.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.aggregate.stats.cache.max.reader.wait</name>
+    <value>1000ms</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified.
+      Number of milliseconds a reader will wait to acquire the readlock before giving up.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.aggregate.stats.cache.max.full</name>
+    <value>0.9</value>
+    <description>Maximum cache full % after which the cache cleaner thread kicks in.</description>
+  </property>
+  <property>
+    <name>hive.metastore.aggregate.stats.cache.clean.until</name>
+    <value>0.8</value>
+    <description>The cleaner thread cleans until cache reaches this % full size.</description>
+  </property>
+  <property>
+    <name>hive.metadata.export.location</name>
+    <value/>
+    <description>
+      When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, 
+      it is the location to which the metadata will be exported. The default is an empty string, which results in the 
+      metadata being exported to the current user's home directory on HDFS.
+    </description>
+  </property>
+  <property>
+    <name>hive.metadata.move.exported.metadata.to.trash</name>
+    <value>true</value>
+    <description>
+      When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, 
+      this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory 
+      alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data.
+    </description>
+  </property>
+  <property>
+    <name>hive.cli.errors.ignore</name>
+    <value>false</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.cli.print.current.db</name>
+    <value>false</value>
+    <description>Whether to include the current database in the Hive prompt.</description>
+  </property>
+  <property>
+    <name>hive.cli.prompt</name>
+    <value>hive</value>
+    <description>
+      Command line prompt configuration value. Other hiveconf can be used in this configuration value. 
+      Variable substitution will only be invoked at the Hive CLI startup.
+    </description>
+  </property>
+  <property>
+    <name>hive.cli.pretty.output.num.cols</name>
+    <value>-1</value>
+    <description>
+      The number of columns to use when formatting output generated by the DESCRIBE PRETTY table_name command.
+      If the value of this property is -1, then Hive will use the auto-detected terminal width.
+    </description>
+  </property>
+  <property>
+    <name>hive.metastore.fs.handler.class</name>
+    <value>org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.session.id</name>
+    <value/>
+    <description/>
+  </property>
+  <property>
+    <name>hive.session.silent</name>
+    <value>false</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.session.history.enabled</name>
+    <value>false</value>
+    <description>Whether to log Hive query, query plan, runtime statistics etc.</description>
+  </property>
+  <property>
+    <name>hive.query.string</name>
+    <value/>
+    <description>Query being executed (might be multiple per a session)</description>
+  </property>
+  <property>
+    <name>hive.query.id</name>
+    <value/>
+    <description>ID for query being executed (might be multiple per a session)</description>
+  </property>
+  <property>
+    <name>hive.jobname.length</name>
+    <value>50</value>
+    <description>max jobname length</description>
+  </property>
+  <property>
+    <name>hive.jar.path</name>
+    <value/>
+    <description>The location of hive_cli.jar that is used when submitting jobs in a separate jvm.</description>
+  </property>
+  <property>
+    <name>hive.aux.jars.path</name>
+    <value/>
+    <description>The location of the plugin jars that contain implementations of user defined functions and serdes.</description>
+  </property>
+  <property>
+    <name>hive.reloadable.aux.jars.path</name>
+    <value/>
+    <description>Jars can be renewed by executing reload command. And these jars can be used as the auxiliary classes like creating a UDF or SerDe.</description>
+  </property>
+  <property>
+    <name>hive.added.files.path</name>
+    <value/>
+    <description>This an internal parameter.</description>
+  </property>
+  <property>
+    <name>hive.added.jars.path</name>
+    <value/>
+    <description>This an internal parameter.</description>
+  </property>
+  <property>
+    <name>hive.added.archives.path</name>
+    <value/>
+    <description>This an internal parameter.</description>
+  </property>
+  <property>
+    <name>hive.auto.progress.timeout</name>
+    <value>0s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      How long to run autoprogressor for the script/UDTF operators.
+      Set to 0 for forever.
+    </description>
+  </property>
+  <property>
+    <name>hive.script.auto.progress</name>
+    <value>false</value>
+    <description>
+      Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker 
+      to avoid the task getting killed because of inactivity.  Hive sends progress information when the script is 
+      outputting to stderr.  This option removes the need of periodically producing stderr messages, 
+      but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker.
+    </description>
+  </property>
+  <property>
+    <name>hive.script.operator.id.env.var</name>
+    <value>HIVE_SCRIPT_OPERATOR_ID</value>
+    <description>
+      Name of the environment variable that holds the unique script operator ID in the user's 
+      transform function (the custom mapper/reducer that the user has specified in the query)
+    </description>
+  </property>
+  <property>
+    <name>hive.script.operator.truncate.env</name>
+    <value>false</value>
+    <description>Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)</description>
+  </property>
+  <property>
+    <name>hive.script.operator.env.blacklist</name>
+    <value>hive.txn.valid.txns,hive.script.operator.env.blacklist</value>
+    <description>Comma separated list of keys from the configuration file not to convert to environment variables when envoking the script operator</description>
+  </property>
+  <property>
+    <name>hive.mapred.mode</name>
+    <value>nonstrict</value>
+    <description>
+      The mode in which the Hive operations are being performed. 
+      In strict mode, some risky queries are not allowed to run. They include:
+        Cartesian Product.
+        No partition being picked up for a query.
+        Comparing bigints and strings.
+        Comparing bigints and doubles.
+        Orderby without limit.
+    </description>
+  </property>
+  <property>
+    <name>hive.alias</name>
+    <value/>
+    <description/>
+  </property>
+  <property>
+    <name>hive.map.aggr</name>
+    <value>true</value>
+    <description>Whether to use map-side aggregation in Hive Group By queries</description>
+  </property>
+  <property>
+    <name>hive.groupby.skewindata</name>
+    <value>false</value>
+    <description>Whether there is skew in data to optimize group by queries</description>
+  </property>
+  <property>
+    <name>hive.join.emit.interval</name>
+    <value>1000</value>
+    <description>How many rows in the right-most join operand Hive should buffer before emitting the join result.</description>
+  </property>
+  <property>
+    <name>hive.join.cache.size</name>
+    <value>25000</value>
+    <description>How many rows in the joining tables (except the streaming table) should be cached in memory.</description>
+  </property>
+  <property>
+    <name>hive.cbo.enable</name>
+    <value>true</value>
+    <description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
+  </property>
+  <property>
+    <name>hive.cbo.returnpath.hiveop</name>
+    <value>false</value>
+    <description>Flag to control calcite plan to hive operator conversion</description>
+  </property>
+  <property>
+    <name>hive.cbo.costmodel.extended</name>
+    <value>false</value>
+    <description>Flag to control enabling the extended cost model based onCPU, IO and cardinality. Otherwise, the cost model is based on cardinality.</description>
+  </property>
+  <property>
+    <name>hive.cbo.costmodel.cpu</name>
+    <value>0.000001</value>
+    <description>Default cost of a comparison</description>
+  </property>
+  <property>
+    <name>hive.cbo.costmodel.network</name>
+    <value>150.0</value>
+    <description>Default cost of a transfering a byte over network; expressed as multiple of CPU cost</description>
+  </property>
+  <property>
+    <name>hive.cbo.costmodel.local.fs.write</name>
+    <value>4.0</value>
+    <description>Default cost of writing a byte to local FS; expressed as multiple of NETWORK cost</description>
+  </property>
+  <property>
+    <name>hive.cbo.costmodel.local.fs.read</name>
+    <value>4.0</value>
+    <description>Default cost of reading a byte from local FS; expressed as multiple of NETWORK cost</description>
+  </property>
+  <property>
+    <name>hive.cbo.costmodel.hdfs.write</name>
+    <value>10.0</value>
+    <description>Default cost of writing a byte to HDFS; expressed as multiple of Local FS write cost</description>
+  </property>
+  <property>
+    <name>hive.cbo.costmodel.hdfs.read</name>
+    <value>1.5</value>
+    <description>Default cost of reading a byte from HDFS; expressed as multiple of Local FS read cost</description>
+  </property>
+  <property>
+    <name>hive.mapjoin.bucket.cache.size</name>
+    <value>100</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.mapjoin.optimized.hashtable</name>
+    <value>true</value>
+    <description>
+      Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,
+      because memory-optimized hashtable cannot be serialized.
+    </description>
+  </property>
+  <property>
+    <name>hive.mapjoin.hybridgrace.hashtable</name>
+    <value>true</value>
+    <description>Whether to use hybridgrace hash join as the join method for mapjoin. Tez only.</description>
+  </property>
+  <property>
+    <name>hive.mapjoin.hybridgrace.memcheckfrequency</name>
+    <value>1024</value>
+    <description>For hybrid grace hash join, how often (how many rows apart) we check if memory is full. This number should be power of 2.</description>
+  </property>
+  <property>
+    <name>hive.mapjoin.hybridgrace.minwbsize</name>
+    <value>524288</value>
+    <description>For hybrid grace hash join, the minimum write buffer size used by optimized hashtable. Default is 512 KB.</description>
+  </property>
+  <property>
+    <name>hive.mapjoin.hybridgrace.minnumpartitions</name>
+    <value>16</value>
+    <description>For hybrid grace hash join, the minimum number of partitions to create.</description>
+  </property>
+  <property>
+    <name>hive.mapjoin.optimized.hashtable.wbsize</name>
+    <value>10485760</value>
+    <description>
+      Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to
+      store data. This is one buffer size. HT may be slightly faster if this is larger, but for small
+      joins unnecessary memory will be allocated and then trimmed.
+    </description>
+  </property>
+  <property>
+    <name>hive.smbjoin.cache.rows</name>
+    <value>10000</value>
+    <description>How many rows with the same key value should be cached in memory per smb joined table.</description>
+  </property>
+  <property>
+    <name>hive.groupby.mapaggr.checkinterval</name>
+    <value>100000</value>
+    <description>Number of rows after which size of the grouping keys/aggregation classes is performed</description>
+  </property>
+  <property>
+    <name>hive.map.aggr.hash.percentmemory</name>
+    <value>0.5</value>
+    <description>Portion of total memory to be used by map-side group aggregation hash table</description>
+  </property>
+  <property>
+    <name>hive.mapjoin.followby.map.aggr.hash.percentmemory</name>
+    <value>0.3</value>
+    <description>Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join</description>
+  </property>
+  <property>
+    <name>hive.map.aggr.hash.force.flush.memory.threshold</name>
+    <value>0.9</value>
+    <description>
+      The max memory to be used by map-side group aggregation hash table.
+      If the memory usage is higher than this number, force to flush data
+    </description>
+  </property>
+  <property>
+    <name>hive.map.aggr.hash.min.reduction</name>
+    <value>0.5</value>
+    <description>
+      Hash aggregation will be turned off if the ratio between hash  table size and input rows is bigger than this number. 
+      Set to 1 to make sure hash aggregation is never turned off.
+    </description>
+  </property>
+  <property>
+    <name>hive.multigroupby.singlereducer</name>
+    <value>true</value>
+    <description>
+      Whether to optimize multi group by query to generate single M/R  job plan. If the multi group by query has 
+      common group by keys, it will be optimized to generate single M/R job.
+    </description>
+  </property>
+  <property>
+    <name>hive.map.groupby.sorted</name>
+    <value>false</value>
+    <description>
+      If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform 
+      the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this
+      is that it limits the number of mappers to the number of files.
+    </description>
+  </property>
+  <property>
+    <name>hive.map.groupby.sorted.testmode</name>
+    <value>false</value>
+    <description>
+      If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform 
+      the group by in the mapper by using BucketizedHiveInputFormat. If the test mode is set, the plan
+      is not converted, but a query property is set to denote the same.
+    </description>
+  </property>
+  <property>
+    <name>hive.groupby.orderby.position.alias</name>
+    <value>false</value>
+    <description>Whether to enable using Column Position Alias in Group By or Order By</description>
+  </property>
+  <property>
+    <name>hive.new.job.grouping.set.cardinality</name>
+    <value>30</value>
+    <description>
+      Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.
+      For a query like: select a, b, c, count(1) from T group by a, b, c with rollup;
+      4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null).
+      This can lead to explosion across map-reduce boundary if the cardinality of T is very high,
+      and map-side aggregation does not do a very good job. 
+      
+      This parameter decides if Hive should add an additional map-reduce job. If the grouping set
+      cardinality (4 in the example above), is more than this value, a new MR job is added under the
+      assumption that the original group by will reduce the data size.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.copyfile.maxsize</name>
+    <value>33554432</value>
+    <description>Maximum file size (in Mb) that Hive uses to do single HDFS copies between directories.Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster.</description>
+  </property>
+  <property>
+    <name>hive.udtf.auto.progress</name>
+    <value>false</value>
+    <description>
+      Whether Hive should automatically send progress information to TaskTracker 
+      when using UDTF's to prevent the task getting killed because of inactivity.  Users should be cautious 
+      because this may prevent TaskTracker from killing tasks with infinite loops.
+    </description>
+  </property>
+  <property>
+    <name>hive.default.fileformat</name>
+    <value>TextFile</value>
+    <description>
+      Expects one of [textfile, sequencefile, rcfile, orc].
+      Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]
+    </description>
+  </property>
+  <property>
+    <name>hive.default.fileformat.managed</name>
+    <value>none</value>
+    <description>
+      Expects one of [none, textfile, sequencefile, rcfile, orc].
+      Default file format for CREATE TABLE statement applied to managed tables only. External tables will be 
+      created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat 
+      for all tables.
+    </description>
+  </property>
+  <property>
+    <name>hive.query.result.fileformat</name>
+    <value>TextFile</value>
+    <description>
+      Expects one of [textfile, sequencefile, rcfile].
+      Default file format for storing result of the query.
+    </description>
+  </property>
+  <property>
+    <name>hive.fileformat.check</name>
+    <value>true</value>
+    <description>Whether to check file format or not when loading data files</description>
+  </property>
+  <property>
+    <name>hive.default.rcfile.serde</name>
+    <value>org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe</value>
+    <description>The default SerDe Hive will use for the RCFile format</description>
+  </property>
+  <property>
+    <name>hive.default.serde</name>
+    <value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
+    <description>The default SerDe Hive will use for storage formats that do not specify a SerDe.</description>
+  </property>
+  <property>
+    <name>hive.serdes.using.metastore.for.schema</name>
+    <value>org.apache.hadoop.hive.ql.io.orc.OrcSerde,org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe,org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe,org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe,org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe,org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe,org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe,org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe</value>
+    <description>SerDes retriving schema from metastore. This an internal parameter. Check with the hive dev. team</description>
+  </property>
+  <property>
+    <name>hive.querylog.location</name>
+    <value>${system:java.io.tmpdir}/${system:user.name}</value>
+    <description>Location of Hive run time structured log file</description>
+  </property>
+  <property>
+    <name>hive.querylog.enable.plan.progress</name>
+    <value>true</value>
+    <description>
+      Whether to log the plan's progress every time a job's progress is checked.
+      These logs are written to the location specified by hive.querylog.location
+    </description>
+  </property>
+  <property>
+    <name>hive.querylog.plan.progress.interval</name>
+    <value>60000ms</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified.
+      The interval to wait between logging the plan's progress.
+      If there is a whole number percentage change in the progress of the mappers or the reducers,
+      the progress is logged regardless of this value.
+      The actual interval will be the ceiling of (this value divided by the value of
+      hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval
+      I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be
+      logged less frequently than specified.
+      This only has an effect if hive.querylog.enable.plan.progress is set to true.
+    </description>
+  </property>
+  <property>
+    <name>hive.script.serde</name>
+    <value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
+    <description>The default SerDe for transmitting input data to and reading output data from the user scripts. </description>
+  </property>
+  <property>
+    <name>hive.script.recordreader</name>
+    <value>org.apache.hadoop.hive.ql.exec.TextRecordReader</value>
+    <description>The default record reader for reading data from the user scripts. </description>
+  </property>
+  <property>
+    <name>hive.script.recordwriter</name>
+    <value>org.apache.hadoop.hive.ql.exec.TextRecordWriter</value>
+    <description>The default record writer for writing data to the user scripts. </description>
+  </property>
+  <property>
+    <name>hive.transform.escape.input</name>
+    <value>false</value>
+    <description>
+      This adds an option to escape special chars (newlines, carriage returns and
+      tabs) when they are passed to the user script. This is useful if the Hive tables
+      can contain data that contains special characters.
+    </description>
+  </property>
+  <property>
+    <name>hive.binary.record.max.length</name>
+    <value>1000</value>
+    <description>
+      Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. 
+      The last record before the end of stream can have less than hive.binary.record.max.length bytes
+    </description>
+  </property>
+  <property>
+    <name>hive.hwi.listen.host</name>
+    <value>0.0.0.0</value>
+    <description>This is the host address the Hive Web Interface will listen on</description>
+  </property>
+  <property>
+    <name>hive.hwi.listen.port</name>
+    <value>9999</value>
+    <description>This is the port the Hive Web Interface will listen on</description>
+  </property>
+  <property>
+    <name>hive.hwi.war.file</name>
+    <value>${env:HWI_WAR_FILE}</value>
+    <description>This sets the path to the HWI war file, relative to ${HIVE_HOME}. </description>
+  </property>
+  <property>
+    <name>hive.mapred.local.mem</name>
+    <value>0</value>
+    <description>mapper/reducer memory in local mode</description>
+  </property>
+  <property>
+    <name>hive.mapjoin.smalltable.filesize</name>
+    <value>25000000</value>
+    <description>
+      The threshold for the input file size of the small tables; if the file size is smaller 
+      than this threshold, it will try to convert the common join into map join
+    </description>
+  </property>
+  <property>
+    <name>hive.sample.seednumber</name>
+    <value>0</value>
+    <description>A number used to percentage sampling. By changing this number, user will change the subsets of data sampled.</description>
+  </property>
+  <property>
+    <name>hive.test.mode</name>
+    <value>false</value>
+    <description>Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.</description>
+  </property>
+  <property>
+    <name>hive.test.mode.prefix</name>
+    <value>test_</value>
+    <description>In test mode, specfies prefixes for the output table</description>
+  </property>
+  <property>
+    <name>hive.test.mode.samplefreq</name>
+    <value>32</value>
+    <description>
+      In test mode, specfies sampling frequency for table, which is not bucketed,
+      For example, the following query:
+        INSERT OVERWRITE TABLE dest SELECT col1 from src
+      would be converted to
+        INSERT OVERWRITE TABLE test_dest
+        SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))
+    </description>
+  </property>
+  <property>
+    <name>hive.test.mode.nosamplelist</name>
+    <value/>
+    <description>In test mode, specifies comma separated table names which would not apply sampling</description>
+  </property>
+  <property>
+    <name>hive.test.dummystats.aggregator</name>
+    <value/>
+    <description>internal variable for test</description>
+  </property>
+  <property>
+    <name>hive.test.dummystats.publisher</name>
+    <value/>
+    <description>internal variable for test</description>
+  </property>
+  <property>
+    <name>hive.test.currenttimestamp</name>
+    <value/>
+    <description>current timestamp for test</description>
+  </property>
+  <property>
+    <name>hive.merge.mapfiles</name>
+    <value>true</value>
+    <description>Merge small files at the end of a map-only job</description>
+  </property>
+  <property>
+    <name>hive.merge.mapredfiles</name>
+    <value>false</value>
+    <description>Merge small files at the end of a map-reduce job</description>
+  </property>
+  <property>
+    <name>hive.merge.tezfiles</name>
+    <value>false</value>
+    <description>Merge small files at the end of a Tez DAG</description>
+  </property>
+  <property>
+    <name>hive.merge.sparkfiles</name>
+    <value>false</value>
+    <description>Merge small files at the end of a Spark DAG Transformation</description>
+  </property>
+  <property>
+    <name>hive.merge.size.per.task</name>
+    <value>256000000</value>
+    <description>Size of merged files at the end of the job</description>
+  </property>
+  <property>
+    <name>hive.merge.smallfiles.avgsize</name>
+    <value>16000000</value>
+    <description>
+      When the average output file size of a job is less than this number, Hive will start an additional 
+      map-reduce job to merge the output files into bigger files. This is only done for map-only jobs 
+      if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true.
+    </description>
+  </property>
+  <property>
+    <name>hive.merge.rcfile.block.level</name>
+    <value>true</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.merge.orcfile.stripe.level</name>
+    <value>true</value>
+    <description>
+      When hive.merge.mapfiles, hive.merge.mapredfiles or hive.merge.tezfiles is enabled
+      while writing a table with ORC file format, enabling this config will do stripe-level
+      fast merge for small ORC files. Note that enabling this config will not honor the
+      padding tolerance config (hive.exec.orc.block.padding.tolerance).
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.rcfile.use.explicit.header</name>
+    <value>true</value>
+    <description>
+      If this is set the header for RCFiles will simply be RCF.  If this is not
+      set the header will be that borrowed from sequence files, e.g. SEQ- followed
+      by the input and output RCFile formats.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.rcfile.use.sync.cache</name>
+    <value>true</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.io.rcfile.record.interval</name>
+    <value>2147483647</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.io.rcfile.column.number.conf</name>
+    <value>0</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.io.rcfile.tolerate.corruptions</name>
+    <value>false</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.io.rcfile.record.buffer.size</name>
+    <value>4194304</value>
+    <description/>
+  </property>
+  <property>
+    <name>parquet.memory.pool.ratio</name>
+    <value>0.5</value>
+    <description>
+      Maximum fraction of heap that can be used by Parquet file writers in one task.
+      It is for avoiding OutOfMemory error in tasks. Work with Parquet 1.6.0 and above.
+      This config parameter is defined in Parquet, so that it does not start with 'hive.'.
+    </description>
+  </property>
+  <property>
+    <name>hive.parquet.timestamp.skip.conversion</name>
+    <value>true</value>
+    <description>Current Hive implementation of parquet stores timestamps to UTC, this flag allows skipping of the conversionon reading parquet files from other tools</description>
+  </property>
+  <property>
+    <name>hive.int.timestamp.conversion.in.seconds</name>
+    <value>false</value>
+    <description>
+      Boolean/tinyint/smallint/int/bigint value is interpreted as milliseconds during the timestamp conversion.
+      Set this flag to true to interpret the value as seconds to be consistent with float/double.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.orc.memory.pool</name>
+    <value>0.5</value>
+    <description>Maximum fraction of heap that can be used by ORC file writers</description>
+  </property>
+  <property>
+    <name>hive.exec.orc.write.format</name>
+    <value/>
+    <description>
+      Define the version of the file to write. Possible values are 0.11 and 0.12.
+      If this parameter is not defined, ORC will use the run length encoding (RLE)
+      introduced in Hive 0.12. Any value other than 0.11 results in the 0.12 encoding.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.orc.default.stripe.size</name>
+    <value>67108864</value>
+    <description>Define the default ORC stripe size, in bytes.</description>
+  </property>
+  <property>
+    <name>hive.exec.orc.default.block.size</name>
+    <value>268435456</value>
+    <description>Define the default file system block size for ORC files.</description>
+  </property>
+  <property>
+    <name>hive.exec.orc.dictionary.key.size.threshold</name>
+    <value>0.8</value>
+    <description>
+      If the number of keys in a dictionary is greater than this fraction of the total number of
+      non-null rows, turn off dictionary encoding.  Use 1 to always use dictionary encoding.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.orc.default.row.index.stride</name>
+    <value>10000</value>
+    <description>
+      Define the default ORC index stride in number of rows. (Stride is the number of rows
+      an index entry represents.)
+    </description>
+  </property>
+  <property>
+    <name>hive.orc.row.index.stride.dictionary.check</name>
+    <value>true</value>
+    <description>
+      If enabled dictionary check will happen after first row index stride (default 10000 rows)
+      else dictionary check will happen before writing first stripe. In both cases, the decision
+      to use dictionary or not will be retained thereafter.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.orc.default.buffer.size</name>
+    <value>262144</value>
+    <description>Define the default ORC buffer size, in bytes.</description>
+  </property>
+  <property>
+    <name>hive.exec.orc.default.block.padding</name>
+    <value>true</value>
+    <description>Define the default block padding, which pads stripes to the HDFS block boundaries.</description>
+  </property>
+  <property>
+    <name>hive.exec.orc.block.padding.tolerance</name>
+    <value>0.05</value>
+    <description>
+      Define the tolerance for block padding as a decimal fraction of stripe size (for
+      example, the default value 0.05 is 5% of the stripe size). For the defaults of 64Mb
+      ORC stripe and 256Mb HDFS blocks, the default block padding tolerance of 5% will
+      reserve a maximum of 3.2Mb for padding within the 256Mb block. In that case, if the
+      available size within the block is more than 3.2Mb, a new smaller stripe will be
+      inserted to fit within that space. This will make sure that no stripe written will
+      cross block boundaries and cause remote reads within a node local task.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.orc.default.compress</name>
+    <value>ZLIB</value>
+    <description>Define the default compression codec for ORC file</description>
+  </property>
+  <property>
+    <name>hive.exec.orc.encoding.strategy</name>
+    <value>SPEED</value>
+    <description>
+      Expects one of [speed, compression].
+      Define the encoding strategy to use while writing data. Changing this will
+      only affect the light weight encoding for integers. This flag will not
+      change the compression level of higher level compression codec (like ZLIB).
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.orc.compression.strategy</name>
+    <value>SPEED</value>
+    <description>
+      Expects one of [speed, compression].
+      Define the compression strategy to use while writing data. 
+      This changes the compression level of higher level compression codec (like ZLIB).
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.orc.split.strategy</name>
+    <value>HYBRID</value>
+    <description>
+      Expects one of [hybrid, bi, etl].
+      This is not a user level config. BI strategy is used when the requirement is to spend less time in split generation as opposed to query execution (split generation does not read or cache file footers). ETL strategy is used when spending little more time in split generation is acceptable (split generation reads and caches file footers). HYBRID chooses between the above strategies based on heuristics.
+    </description>
+  </property>
+  <property>
+    <name>hive.orc.splits.include.file.footer</name>
+    <value>false</value>
+    <description>
+      If turned on splits generated by orc will include metadata about the stripes in the file. This
+      data is read remotely (from the client or HS2 machine) and sent to all the tasks.
+    </description>
+  </property>
+  <property>
+    <name>hive.orc.cache.stripe.details.size</name>
+    <value>10000</value>
+    <description>Cache size for keeping meta info about orc splits cached in the client.</description>
+  </property>
+  <property>
+    <name>hive.orc.compute.splits.num.threads</name>
+    <value>10</value>
+    <description>How many threads orc should use to create splits in parallel.</description>
+  </property>
+  <property>
+    <name>hive.exec.orc.skip.corrupt.data</name>
+    <value>false</value>
+    <description>
+      If ORC reader encounters corrupt data, this value will be used to determine
+      whether to skip the corrupt data or throw exception. The default behavior is to throw exception.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.orc.zerocopy</name>
+    <value>false</value>
+    <description>Use zerocopy reads with ORC. (This requires Hadoop 2.3 or later.)</description>
+  </property>
+  <property>
+    <name>hive.lazysimple.extended_boolean_literal</name>
+    <value>false</value>
+    <description>
+      LazySimpleSerde uses this property to determine if it treats 'T', 't', 'F', 'f',
+      '1', and '0' as extened, legal boolean literal, in addition to 'TRUE' and 'FALSE'.
+      The default is false, which means only 'TRUE' and 'FALSE' are treated as legal
+      boolean literal.
+    </description>
+  </property>
+  <property>
+    <name>hive.optimize.skewjoin</name>
+    <value>false</value>
+    <description>
+      Whether to enable skew join optimization. 
+      The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of
+      processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce
+      job, process those skewed keys. The same key need not be skewed for all the tables, and so,
+      the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a
+      map-join.
+    </description>
+  </property>
+  <property>
+    <name>hive.auto.convert.join</name>
+    <value>true</value>
+    <description>Whether Hive enables the optimization about converting common join into mapjoin based on the input file size</description>
+  </property>
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask</name>
+    <value>true</value>
+    <description>
+      Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. 
+      If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
+      specified size, the join is directly converted to a mapjoin (there is no conditional task).
+    </description>
+  </property>
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>10000000</value>
+    <description>
+      If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. 
+      However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, 
+      the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB
+    </description>
+  </property>
+  <property>
+    <name>hive.auto.convert.join.use.nonstaged</name>
+    <value>false</value>
+    <description>
+      For conditional joins, if input stream from a small alias can be directly applied to join operator without 
+      filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.
+      Currently, this is not working with vectorization or tez execution engine.
+    </description>
+  </property>
+  <property>
+    <name>hive.skewjoin.key</name>
+    <value>100000</value>
+    <description>
+      Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,
+      we think the key as a skew join key. 
+    </description>
+  </property>
+  <property>
+    <name>hive.skewjoin.mapjoin.map.tasks</name>
+    <value>10000</value>
+    <description>
+      Determine the number of map task used in the follow up map join job for a skew join.
+      It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine grained control.
+    </description>
+  </property>
+  <property>
+    <name>hive.skewjoin.mapjoin.min.split</name>
+    <value>33554432</value>
+    <description>
+      Determine the number of map task at most used in the follow up map join job for a skew join by specifying 
+      the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine grained control.
+    </description>
+  </property>
+  <property>
+    <name>hive.heartbeat.interval</name>
+    <value>1000</value>
+    <description>Send a heartbeat after this interval - used by mapjoin and filter operators</description>
+  </property>
+  <property>
+    <name>hive.limit.row.max.size</name>
+    <value>100000</value>
+    <description>When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least.</description>
+  </property>
+  <property>
+    <name>hive.limit.optimize.limit.file</name>
+    <value>10</value>
+    <description>When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample.</description>
+  </property>
+  <property>
+    <name>hive.limit.optimize.enable</name>
+    <value>false</value>
+    <description>Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first.</description>
+  </property>
+  <property>
+    <name>hive.limit.optimize.fetch.max</name>
+    <value>50000</value>
+    <description>
+      Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. 
+      Insert queries are not restricted by this limit.
+    </description>
+  </property>
+  <property>
+    <name>hive.limit.pushdown.memory.usage</name>
+    <value>-1.0</value>
+    <description>The max memory to be used for hash in RS operator for top K selection.</description>
+  </property>
+  <property>
+    <name>hive.limit.query.max.table.partition</name>
+    <value>-1</value>
+    <description>
+      This controls how many partitions can be scanned for each partitioned table.
+      The default value "-1" means no limit.
+    </description>
+  </property>
+  <property>
+    <name>hive.hashtable.key.count.adjustment</name>
+    <value>1.0</value>
+    <description>Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate of the number of keys is divided by this value. If the value is 0, statistics are not usedand hive.hashtable.initialCapacity is used instead.</description>
+  </property>
+  <property>
+    <name>hive.hashtable.initialCapacity</name>
+    <value>100000</value>
+    <description>Initial capacity of mapjoin hashtable if statistics are absent, or if hive.hashtable.stats.key.estimate.adjustment is set to 0</description>
+  </property>
+  <property>
+    <name>hive.hashtable.loadfactor</name>
+    <value>0.75</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.mapjoin.followby.gby.localtask.max.memory.usage</name>
+    <value>0.55</value>
+    <description>
+      This number means how much memory the local task can take to hold the key/value into an in-memory hash table 
+      when this map join is followed by a group by. If the local task's memory usage is more than this number, 
+      the local task will abort by itself. It means the data of the small table is too large to be held in memory.
+    </description>
+  </property>
+  <property>
+    <name>hive.mapjoin.localtask.max.memory.usage</name>
+    <value>0.9</value>
+    <description>
+      This number means how much memory the local task can take to hold the key/value into an in-memory hash table. 
+      If the local task's memory usage is more than this number, the local task will abort by itself. 
+      It means the data of the small table is too large to be held in memory.
+    </description>
+  </property>
+  <property>
+    <name>hive.mapjoin.check.memory.rows</name>
+    <value>100000</value>
+    <description>The number means after how many rows processed it needs to check the memory usage</description>
+  </property>
+  <property>
+    <name>hive.debug.localtask</name>
+    <value>false</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.input.format</name>
+    <value>org.apache.hadoop.hive.ql.io.CombineHiveInputFormat</value>
+    <description>The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat.</description>
+  </property>
+  <property>
+    <name>hive.tez.input.format</name>
+    <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
+    <description>The default input format for tez. Tez groups splits in the AM.</description>
+  </property>
+  <property>
+    <name>hive.tez.container.size</name>
+    <value>-1</value>
+    <description>By default Tez will spawn containers of the size of a mapper. This can be used to overwrite.</description>
+  </property>
+  <property>
+    <name>hive.tez.cpu.vcores</name>
+    <value>-1</value>
+    <description>
+      By default Tez will ask for however many cpus map-reduce is configured to use per container.
+      This can be used to overwrite.
+    </description>
+  </property>
+  <property>
+    <name>hive.tez.java.opts</name>
+    <value/>
+    <description>By default Tez will use the Java options from map tasks. This can be used to overwrite.</description>
+  </property>
+  <property>
+    <name>hive.tez.log.level</name>
+    <value>INFO</value>
+    <description>
+      The log level to use for tasks executing as part of the DAG.
+      Used only if hive.tez.java.opts is used to configure Java options.
+    </description>
+  </property>
+  <property>
+    <name>hive.enforce.bucketing</name>
+    <value>false</value>
+    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+  </property>
+  <property>
+    <name>hive.enforce.sorting</name>
+    <value>false</value>
+    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+  </property>
+  <property>
+    <name>hive.optimize.bucketingsorting</name>
+    <value>true</value>
+    <description>
+      If hive.enforce.bucketing or hive.enforce.sorting is true, don't create a reducer for enforcing 
+      bucketing/sorting for queries of the form: 
+      insert overwrite table T2 select * from T1;
+      where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets.
+    </description>
+  </property>
+  <property>
+    <name>hive.mapred.partitioner</name>
+    <value>org.apache.hadoop.hive.ql.io.DefaultHivePartitioner</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.enforce.sortmergebucketmapjoin</name>
+    <value>false</value>
+    <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?</description>
+  </property>
+  <property>
+    <name>hive.enforce.bucketmapjoin</name>
+    <value>false</value>
+    <description>
+      If the user asked for bucketed map-side join, and it cannot be performed, 
+      should the query fail or not ? For example, if the buckets in the tables being joined are
+      not a multiple of each other, bucketed map-side join cannot be performed, and the
+      query will fail if hive.enforce.bucketmapjoin is set to true.
+    </description>
+  </property>
+  <property>
+    <name>hive.auto.convert.sortmerge.join</name>
+    <value>false</value>
+    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join.</description>
+  </property>
+  <property>
+    <name>hive.auto.convert.sortmerge.join.bigtable.selection.policy</name>
+    <value>org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ</value>
+    <description>
+      The policy to choose the big table for automatic conversion to sort-merge join. 
+      By default, the table with the largest partitions is assigned the big table. All policies are:
+      . based on position of the table - the leftmost table is selected
+      org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ.
+      . based on total size (all the partitions selected in the query) of the table 
+      org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ.
+      . based on average size (all the partitions selected in the query) of the table 
+      org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.
+      New policies can be added in future.
+    </description>
+  </property>
+  <property>
+    <name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
+    <value>false</value>
+    <description>
+      If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join, 
+      this parameter decides whether each table should be tried as a big table, and effectively a map-join should be
+      tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the
+      big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a
+      sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted
+      and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table
+      with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster
+      if the complete small table can fit in memory, and a map-join can be performed.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.script.trust</name>
+    <value>false</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.exec.rowoffset</name>
+    <value>false</value>
+    <description>Whether to provide the row offset virtual column</description>
+  </property>
+  <property>
+    <name>hive.hadoop.supports.splittable.combineinputformat</name>
+    <value>false</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.optimize.index.filter</name>
+    <value>false</value>
+    <description>Whether to enable automatic use of indexes</description>
+  </property>
+  <property>
+    <name>hive.optimize.index.autoupdate</name>
+    <value>false</value>
+    <description>Whether to update stale indexes automatically</description>
+  </property>
+  <property>
+    <name>hive.optimize.ppd</name>
+    <value>true</value>
+    <description>Whether to enable predicate pushdown</description>
+  </property>
+  <property>
+    <name>hive.ppd.recognizetransivity</name>
+    <value>true</value>
+    <description>Whether to transitively replicate predicate filters over equijoin conditions.</description>
+  </property>
+  <property>
+    <name>hive.ppd.remove.duplicatefilters</name>
+    <value>true</value>
+    <description>Whether to push predicates down into storage handlers.  Ignored when hive.optimize.ppd is false.</description>
+  </property>
+  <property>
+    <name>hive.optimize.constant.propagation</name>
+    <value>true</value>
+    <description>Whether to enable constant propagation optimizer</description>
+  </property>
+  <property>
+    <name>hive.optimize.remove.identity.project</name>
+    <value>true</value>
+    <description>Removes identity project from operator tree</description>
+  </property>
+  <property>
+    <name>hive.optimize.metadataonly</name>
+    <value>true</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.optimize.null.scan</name>
+    <value>true</value>
+    <description>Dont scan relations which are guaranteed to not generate any rows</description>
+  </property>
+  <property>
+    <name>hive.optimize.ppd.storage</name>
+    <value>true</value>
+    <description>Whether to push predicates down to storage handlers</description>
+  </property>
+  <property>
+    <name>hive.optimize.groupby</name>
+    <value>true</value>
+    <description>Whether to enable the bucketed group by from bucketed partitions/tables.</description>

<TRUNCATED>
http://git-wip-us.apache.org/repos/asf/incubator-griffin/blob/2de6a549/docker/griffin_env/conf/livy/livy.conf
----------------------------------------------------------------------
diff --git a/docker/griffin_env/conf/livy/livy.conf b/docker/griffin_env/conf/livy/livy.conf
new file mode 100644
index 0000000..581863f
--- /dev/null
+++ b/docker/griffin_env/conf/livy/livy.conf
@@ -0,0 +1,3 @@
+livy.spark.master = yarn
+livy.spark.deployMode = cluster
+livy.repl.enableHiveContext = true