You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/09/05 02:13:34 UTC

svn commit: r1622587 [2/3] - in /hive/branches/cbo: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java conf/hive-default.xml.template data/conf/hive-site.xml

Modified: hive/branches/cbo/conf/hive-default.xml.template
URL: http://svn.apache.org/viewvc/hive/branches/cbo/conf/hive-default.xml.template?rev=1622587&r1=1622586&r2=1622587&view=diff
==============================================================================
--- hive/branches/cbo/conf/hive-default.xml.template (original)
+++ hive/branches/cbo/conf/hive-default.xml.template Fri Sep  5 00:13:34 2014
@@ -23,17 +23,17 @@
   <!-- WARNING!!! You must make your changes in hive-site.xml instead.         -->
   <!-- Hive Execution Parameters -->
   <property>
-    <key>hive.exec.script.wrapper</key>
+    <name>hive.exec.script.wrapper</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.exec.plan</key>
+    <name>hive.exec.plan</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.plan.serialization.format</key>
+    <name>hive.plan.serialization.format</name>
     <value>kryo</value>
     <description>
       Query plan format serialization between client and task nodes. 
@@ -41,27 +41,27 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.scratchdir</key>
+    <name>hive.exec.scratchdir</name>
     <value>/tmp/hive-${system:user.name}</value>
     <description>Scratch space for Hive jobs</description>
   </property>
   <property>
-    <key>hive.exec.local.scratchdir</key>
+    <name>hive.exec.local.scratchdir</name>
     <value>${system:java.io.tmpdir}/${system:user.name}</value>
     <description>Local scratch space for Hive jobs</description>
   </property>
   <property>
-    <key>hive.scratch.dir.permission</key>
+    <name>hive.scratch.dir.permission</name>
     <value>700</value>
     <description/>
   </property>
   <property>
-    <key>hive.exec.submitviachild</key>
+    <name>hive.exec.submitviachild</name>
     <value>false</value>
     <description/>
   </property>
   <property>
-    <key>hive.exec.submit.local.task.via.child</key>
+    <name>hive.exec.submit.local.task.via.child</name>
     <value>true</value>
     <description>
       Determines whether local tasks (typically mapjoin hashtable generation phase) runs in 
@@ -70,7 +70,7 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.script.maxerrsize</key>
+    <name>hive.exec.script.maxerrsize</name>
     <value>100000</value>
     <description>
       Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). 
@@ -78,7 +78,7 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.script.allow.partial.consumption</key>
+    <name>hive.exec.script.allow.partial.consumption</name>
     <value>false</value>
     <description>
       When enabled, this option allows a user script to exit successfully without consuming 
@@ -86,17 +86,17 @@
     </description>
   </property>
   <property>
-    <key>stream.stderr.reporter.prefix</key>
+    <name>stream.stderr.reporter.prefix</name>
     <value>reporter:</value>
     <description>Streaming jobs that log to standard error with this prefix can log counter or status information.</description>
   </property>
   <property>
-    <key>stream.stderr.reporter.enabled</key>
+    <name>stream.stderr.reporter.enabled</name>
     <value>true</value>
     <description>Enable consumption of status and counter messages for streaming jobs.</description>
   </property>
   <property>
-    <key>hive.exec.compress.output</key>
+    <name>hive.exec.compress.output</name>
     <value>false</value>
     <description>
       This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. 
@@ -104,7 +104,7 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.compress.intermediate</key>
+    <name>hive.exec.compress.intermediate</name>
     <value>false</value>
     <description>
       This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. 
@@ -112,22 +112,22 @@
     </description>
   </property>
   <property>
-    <key>hive.intermediate.compression.codec</key>
+    <name>hive.intermediate.compression.codec</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.intermediate.compression.type</key>
+    <name>hive.intermediate.compression.type</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.exec.reducers.bytes.per.reducer</key>
+    <name>hive.exec.reducers.bytes.per.reducer</name>
     <value>1000000000</value>
     <description>size per reducer.The default is 1G, i.e if the input size is 10G, it will use 10 reducers.</description>
   </property>
   <property>
-    <key>hive.exec.reducers.max</key>
+    <name>hive.exec.reducers.max</name>
     <value>999</value>
     <description>
       max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is
@@ -135,7 +135,7 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.pre.hooks</key>
+    <name>hive.exec.pre.hooks</name>
     <value/>
     <description>
       Comma-separated list of pre-execution hooks to be invoked for each statement. 
@@ -144,7 +144,7 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.post.hooks</key>
+    <name>hive.exec.post.hooks</name>
     <value/>
     <description>
       Comma-separated list of post-execution hooks to be invoked for each statement. 
@@ -153,7 +153,7 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.failure.hooks</key>
+    <name>hive.exec.failure.hooks</name>
     <value/>
     <description>
       Comma-separated list of on-failure hooks to be invoked for each statement. 
@@ -162,7 +162,7 @@
     </description>
   </property>
   <property>
-    <key>hive.client.stats.publishers</key>
+    <name>hive.client.stats.publishers</name>
     <value/>
     <description>
       Comma-separated list of statistics publishers to be invoked on counters on each job. 
@@ -171,22 +171,22 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.parallel</key>
+    <name>hive.exec.parallel</name>
     <value>false</value>
     <description>Whether to execute jobs in parallel</description>
   </property>
   <property>
-    <key>hive.exec.parallel.thread.number</key>
+    <name>hive.exec.parallel.thread.number</name>
     <value>8</value>
     <description>How many jobs at most can be executed in parallel</description>
   </property>
   <property>
-    <key>hive.mapred.reduce.tasks.speculative.execution</key>
+    <name>hive.mapred.reduce.tasks.speculative.execution</name>
     <value>true</value>
     <description>Whether speculative execution for reducers should be turned on. </description>
   </property>
   <property>
-    <key>hive.exec.counters.pull.interval</key>
+    <name>hive.exec.counters.pull.interval</name>
     <value>1000</value>
     <description>
       The interval with which to poll the JobTracker for the counters the running job. 
@@ -194,12 +194,12 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.dynamic.partition</key>
+    <name>hive.exec.dynamic.partition</name>
     <value>true</value>
     <description>Whether or not to allow dynamic partitions in DML/DDL.</description>
   </property>
   <property>
-    <key>hive.exec.dynamic.partition.mode</key>
+    <name>hive.exec.dynamic.partition.mode</name>
     <value>strict</value>
     <description>
       In strict mode, the user must specify at least one static partition 
@@ -207,27 +207,27 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.max.dynamic.partitions</key>
+    <name>hive.exec.max.dynamic.partitions</name>
     <value>1000</value>
     <description>Maximum number of dynamic partitions allowed to be created in total.</description>
   </property>
   <property>
-    <key>hive.exec.max.dynamic.partitions.pernode</key>
+    <name>hive.exec.max.dynamic.partitions.pernode</name>
     <value>100</value>
     <description>Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.</description>
   </property>
   <property>
-    <key>hive.exec.max.created.files</key>
+    <name>hive.exec.max.created.files</name>
     <value>100000</value>
     <description>Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.</description>
   </property>
   <property>
-    <key>hive.downloaded.resources.dir</key>
+    <name>hive.downloaded.resources.dir</name>
     <value>${system:java.io.tmpdir}/${hive.session.id}_resources</value>
     <description>Temporary local directory for added resources in the remote file system.</description>
   </property>
   <property>
-    <key>hive.exec.default.partition.name</key>
+    <name>hive.exec.default.partition.name</name>
     <value>__HIVE_DEFAULT_PARTITION__</value>
     <description>
       The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. 
@@ -236,12 +236,12 @@
     </description>
   </property>
   <property>
-    <key>hive.lockmgr.zookeeper.default.partition.name</key>
+    <name>hive.lockmgr.zookeeper.default.partition.name</name>
     <value>__HIVE_DEFAULT_ZOOKEEPER_PARTITION__</value>
     <description/>
   </property>
   <property>
-    <key>hive.exec.show.job.failure.debug.info</key>
+    <name>hive.exec.show.job.failure.debug.info</name>
     <value>true</value>
     <description>
       If a job fails, whether to provide a link in the CLI to the task with the
@@ -249,7 +249,7 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.job.debug.capture.stacktraces</key>
+    <name>hive.exec.job.debug.capture.stacktraces</name>
     <value>true</value>
     <description>
       Whether or not stack traces parsed from the task logs of a sampled failed task 
@@ -257,17 +257,17 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.job.debug.timeout</key>
+    <name>hive.exec.job.debug.timeout</name>
     <value>30000</value>
     <description/>
   </property>
   <property>
-    <key>hive.exec.tasklog.debug.timeout</key>
+    <name>hive.exec.tasklog.debug.timeout</name>
     <value>20000</value>
     <description/>
   </property>
   <property>
-    <key>hive.output.file.extension</key>
+    <name>hive.output.file.extension</name>
     <value/>
     <description>
       String used as a file extension for output files. 
@@ -275,37 +275,37 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.mode.local.auto</key>
+    <name>hive.exec.mode.local.auto</name>
     <value>false</value>
     <description>Let Hive determine whether to run in local mode automatically</description>
   </property>
   <property>
-    <key>hive.exec.mode.local.auto.inputbytes.max</key>
+    <name>hive.exec.mode.local.auto.inputbytes.max</name>
     <value>134217728</value>
     <description>When hive.exec.mode.local.auto is true, input bytes should less than this for local mode.</description>
   </property>
   <property>
-    <key>hive.exec.mode.local.auto.input.files.max</key>
+    <name>hive.exec.mode.local.auto.input.files.max</name>
     <value>4</value>
     <description>When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode.</description>
   </property>
   <property>
-    <key>hive.exec.drop.ignorenonexistent</key>
+    <name>hive.exec.drop.ignorenonexistent</name>
     <value>true</value>
     <description>Do not report an error if DROP TABLE/VIEW specifies a non-existent table/view</description>
   </property>
   <property>
-    <key>hive.ignore.mapjoin.hint</key>
+    <name>hive.ignore.mapjoin.hint</name>
     <value>true</value>
     <description>Ignore the mapjoin hint</description>
   </property>
   <property>
-    <key>hive.file.max.footer</key>
+    <name>hive.file.max.footer</name>
     <value>100</value>
     <description>maximum number of lines for footer user can define for a table file</description>
   </property>
   <property>
-    <key>hive.resultset.use.unique.column.names</key>
+    <name>hive.resultset.use.unique.column.names</name>
     <value>true</value>
     <description>
       Make column names unique in the result set by qualifying column names with table alias if needed.
@@ -314,77 +314,86 @@
     </description>
   </property>
   <property>
-    <key>fs.har.impl</key>
+    <name>fs.har.impl</name>
     <value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value>
     <description>The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20</description>
   </property>
   <property>
-    <key>hive.metastore.metadb.dir</key>
+    <name>hive.metastore.metadb.dir</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.metastore.warehouse.dir</key>
+    <name>hive.metastore.warehouse.dir</name>
     <value>/user/hive/warehouse</value>
     <description>location of default database for the warehouse</description>
   </property>
   <property>
-    <key>hive.metastore.uris</key>
+    <name>hive.metastore.uris</name>
     <value/>
     <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
   </property>
   <property>
-    <key>hive.metastore.connect.retries</key>
+    <name>hive.metastore.connect.retries</name>
     <value>3</value>
     <description>Number of retries while opening a connection to metastore</description>
   </property>
   <property>
-    <key>hive.metastore.failure.retries</key>
+    <name>hive.metastore.failure.retries</name>
     <value>1</value>
     <description>Number of retries upon failure of Thrift metastore calls</description>
   </property>
   <property>
-    <key>hive.metastore.client.connect.retry.delay</key>
-    <value>1</value>
-    <description>Number of seconds for the client to wait between consecutive connection attempts</description>
+    <name>hive.metastore.client.connect.retry.delay</name>
+    <value>1s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Number of seconds for the client to wait between consecutive connection attempts
+    </description>
   </property>
   <property>
-    <key>hive.metastore.client.socket.timeout</key>
-    <value>600</value>
-    <description>MetaStore Client socket timeout in seconds</description>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>600s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      MetaStore Client socket timeout in seconds
+    </description>
   </property>
   <property>
-    <key>javax.jdo.option.ConnectionPassword</key>
+    <name>javax.jdo.option.ConnectionPassword</name>
     <value>mine</value>
     <description>password to use against metastore database</description>
   </property>
   <property>
-    <key>hive.metastore.ds.connection.url.hook</key>
+    <name>hive.metastore.ds.connection.url.hook</name>
     <value/>
     <description>Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used</description>
   </property>
   <property>
-    <key>javax.jdo.option.Multithreaded</key>
+    <name>javax.jdo.option.Multithreaded</name>
     <value>true</value>
     <description>Set this to true if multiple threads access metastore through JDO concurrently.</description>
   </property>
   <property>
-    <key>javax.jdo.option.ConnectionURL</key>
+    <name>javax.jdo.option.ConnectionURL</name>
     <value>jdbc:derby:;databaseName=metastore_db;create=true</value>
     <description>JDBC connect string for a JDBC metastore</description>
   </property>
   <property>
-    <key>hive.hmshandler.retry.attempts</key>
+    <name>hive.hmshandler.retry.attempts</name>
     <value>1</value>
-    <description>The number of times to retry a HMSHandler call if there were a connection error</description>
+    <description>The number of times to retry a HMSHandler call if there were a connection error.</description>
   </property>
   <property>
-    <key>hive.hmshandler.retry.interval</key>
-    <value>1000</value>
-    <description>The number of milliseconds between HMSHandler retry attempts</description>
+    <name>hive.hmshandler.retry.interval</name>
+    <value>1000ms</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified.
+      The time between HMSHandler retry attempts on failure.
+    </description>
   </property>
   <property>
-    <key>hive.hmshandler.force.reload.conf</key>
+    <name>hive.hmshandler.force.reload.conf</name>
     <value>false</value>
     <description>
       Whether to force reloading of the HMSHandler configuration (including
@@ -394,22 +403,22 @@
     </description>
   </property>
   <property>
-    <key>hive.metastore.server.min.threads</key>
+    <name>hive.metastore.server.min.threads</name>
     <value>200</value>
     <description>Minimum number of worker threads in the Thrift server's pool.</description>
   </property>
   <property>
-    <key>hive.metastore.server.max.threads</key>
+    <name>hive.metastore.server.max.threads</name>
     <value>100000</value>
     <description>Maximum number of worker threads in the Thrift server's pool.</description>
   </property>
   <property>
-    <key>hive.metastore.server.tcp.keepalive</key>
+    <name>hive.metastore.server.tcp.keepalive</name>
     <value>true</value>
     <description>Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections.</description>
   </property>
   <property>
-    <key>hive.metastore.archive.intermediate.original</key>
+    <name>hive.metastore.archive.intermediate.original</name>
     <value>_INTERMEDIATE_ORIGINAL</value>
     <description>
       Intermediate dir suffixes used for archiving. Not important what they
@@ -417,22 +426,22 @@
     </description>
   </property>
   <property>
-    <key>hive.metastore.archive.intermediate.archived</key>
+    <name>hive.metastore.archive.intermediate.archived</name>
     <value>_INTERMEDIATE_ARCHIVED</value>
     <description/>
   </property>
   <property>
-    <key>hive.metastore.archive.intermediate.extracted</key>
+    <name>hive.metastore.archive.intermediate.extracted</name>
     <value>_INTERMEDIATE_EXTRACTED</value>
     <description/>
   </property>
   <property>
-    <key>hive.metastore.kerberos.keytab.file</key>
+    <name>hive.metastore.kerberos.keytab.file</name>
     <value/>
     <description>The path to the Kerberos Keytab file containing the metastore Thrift server's service principal.</description>
   </property>
   <property>
-    <key>hive.metastore.kerberos.principal</key>
+    <name>hive.metastore.kerberos.principal</name>
     <value>hive-metastore/_HOST@EXAMPLE.COM</value>
     <description>
       The service principal for the metastore Thrift server. 
@@ -440,77 +449,77 @@
     </description>
   </property>
   <property>
-    <key>hive.metastore.sasl.enabled</key>
+    <name>hive.metastore.sasl.enabled</name>
     <value>false</value>
     <description>If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos.</description>
   </property>
   <property>
-    <key>hive.metastore.thrift.framed.transport.enabled</key>
+    <name>hive.metastore.thrift.framed.transport.enabled</name>
     <value>false</value>
     <description>If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used.</description>
   </property>
   <property>
-    <key>hive.cluster.delegation.token.store.class</key>
+    <name>hive.cluster.delegation.token.store.class</name>
     <value>org.apache.hadoop.hive.thrift.MemoryTokenStore</value>
     <description>The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.</description>
   </property>
   <property>
-    <key>hive.cluster.delegation.token.store.zookeeper.connectString</key>
+    <name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
     <value/>
     <description>The ZooKeeper token store connect string.</description>
   </property>
   <property>
-    <key>hive.cluster.delegation.token.store.zookeeper.znode</key>
+    <name>hive.cluster.delegation.token.store.zookeeper.znode</name>
     <value>/hive/cluster/delegation</value>
     <description>The root path for token store data.</description>
   </property>
   <property>
-    <key>hive.cluster.delegation.token.store.zookeeper.acl</key>
+    <name>hive.cluster.delegation.token.store.zookeeper.acl</name>
     <value/>
     <description>ACL for token store entries. List comma separated all server principals for the cluster.</description>
   </property>
   <property>
-    <key>hive.metastore.cache.pinobjtypes</key>
+    <name>hive.metastore.cache.pinobjtypes</name>
     <value>Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order</value>
     <description>List of comma separated metastore object types that should be pinned in the cache</description>
   </property>
   <property>
-    <key>datanucleus.connectionPoolingType</key>
+    <name>datanucleus.connectionPoolingType</name>
     <value>BONECP</value>
     <description>Specify connection pool library for datanucleus</description>
   </property>
   <property>
-    <key>datanucleus.validateTables</key>
+    <name>datanucleus.validateTables</name>
     <value>false</value>
     <description>validates existing schema against code. turn this on if you want to verify existing schema</description>
   </property>
   <property>
-    <key>datanucleus.validateColumns</key>
+    <name>datanucleus.validateColumns</name>
     <value>false</value>
     <description>validates existing schema against code. turn this on if you want to verify existing schema</description>
   </property>
   <property>
-    <key>datanucleus.validateConstraints</key>
+    <name>datanucleus.validateConstraints</name>
     <value>false</value>
     <description>validates existing schema against code. turn this on if you want to verify existing schema</description>
   </property>
   <property>
-    <key>datanucleus.storeManagerType</key>
+    <name>datanucleus.storeManagerType</name>
     <value>rdbms</value>
     <description>metadata store type</description>
   </property>
   <property>
-    <key>datanucleus.autoCreateSchema</key>
+    <name>datanucleus.autoCreateSchema</name>
     <value>true</value>
     <description>creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once</description>
   </property>
   <property>
-    <key>datanucleus.fixedDatastore</key>
+    <name>datanucleus.fixedDatastore</name>
     <value>false</value>
     <description/>
   </property>
   <property>
-    <key>hive.metastore.schema.verification</key>
+    <name>hive.metastore.schema.verification</name>
     <value>false</value>
     <description>
       Enforce metastore schema version consistency.
@@ -521,27 +530,27 @@
     </description>
   </property>
   <property>
-    <key>datanucleus.autoStartMechanismMode</key>
+    <name>datanucleus.autoStartMechanismMode</name>
     <value>checked</value>
     <description>throw exception if metadata tables are incorrect</description>
   </property>
   <property>
-    <key>datanucleus.transactionIsolation</key>
+    <name>datanucleus.transactionIsolation</name>
     <value>read-committed</value>
     <description>Default transaction isolation level for identity generation.</description>
   </property>
   <property>
-    <key>datanucleus.cache.level2</key>
+    <name>datanucleus.cache.level2</name>
     <value>false</value>
     <description>Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server</description>
   </property>
   <property>
-    <key>datanucleus.cache.level2.type</key>
+    <name>datanucleus.cache.level2.type</name>
     <value>none</value>
     <description/>
   </property>
   <property>
-    <key>datanucleus.identifierFactory</key>
+    <name>datanucleus.identifierFactory</name>
     <value>datanucleus1</value>
     <description>
       Name of the identifier factory to use when generating table/column names etc. 
@@ -549,17 +558,17 @@
     </description>
   </property>
   <property>
-    <key>datanucleus.rdbms.useLegacyNativeValueStrategy</key>
+    <name>datanucleus.rdbms.useLegacyNativeValueStrategy</name>
     <value>true</value>
     <description/>
   </property>
   <property>
-    <key>datanucleus.plugin.pluginRegistryBundleCheck</key>
+    <name>datanucleus.plugin.pluginRegistryBundleCheck</name>
     <value>LOG</value>
     <description>Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]</description>
   </property>
   <property>
-    <key>hive.metastore.batch.retrieve.max</key>
+    <name>hive.metastore.batch.retrieve.max</name>
     <value>300</value>
     <description>
       Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. 
@@ -568,12 +577,12 @@
     </description>
   </property>
   <property>
-    <key>hive.metastore.batch.retrieve.table.partition.max</key>
+    <name>hive.metastore.batch.retrieve.table.partition.max</name>
     <value>1000</value>
     <description>Maximum number of table partitions that metastore internally retrieves in one batch.</description>
   </property>
   <property>
-    <key>hive.metastore.init.hooks</key>
+    <name>hive.metastore.init.hooks</name>
     <value/>
     <description>
       A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. 
@@ -581,17 +590,17 @@
     </description>
   </property>
   <property>
-    <key>hive.metastore.pre.event.listeners</key>
+    <name>hive.metastore.pre.event.listeners</name>
     <value/>
     <description>List of comma separated listeners for metastore events.</description>
   </property>
   <property>
-    <key>hive.metastore.event.listeners</key>
+    <name>hive.metastore.event.listeners</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.metastore.authorization.storage.checks</key>
+    <name>hive.metastore.authorization.storage.checks</name>
     <value>false</value>
     <description>
       Should the metastore do authorization checks against the underlying storage (usually hdfs) 
@@ -601,17 +610,23 @@
     </description>
   </property>
   <property>
-    <key>hive.metastore.event.clean.freq</key>
-    <value>0</value>
-    <description>Frequency at which timer task runs to purge expired events in metastore(in seconds).</description>
+    <name>hive.metastore.event.clean.freq</name>
+    <value>0s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Frequency at which timer task runs to purge expired events in metastore.
+    </description>
   </property>
   <property>
-    <key>hive.metastore.event.expiry.duration</key>
-    <value>0</value>
-    <description>Duration after which events expire from events table (in seconds)</description>
+    <name>hive.metastore.event.expiry.duration</name>
+    <value>0s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Duration after which events expire from events table
+    </description>
   </property>
   <property>
-    <key>hive.metastore.execute.setugi</key>
+    <name>hive.metastore.execute.setugi</name>
     <value>true</value>
     <description>
       In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using 
@@ -621,12 +636,12 @@
     </description>
   </property>
   <property>
-    <key>hive.metastore.partition.name.whitelist.pattern</key>
+    <name>hive.metastore.partition.name.whitelist.pattern</name>
     <value/>
     <description>Partition names will be checked against this regex pattern and rejected if not matched.</description>
   </property>
   <property>
-    <key>hive.metastore.integral.jdo.pushdown</key>
+    <name>hive.metastore.integral.jdo.pushdown</name>
     <value>false</value>
     <description>
       Allow JDO query pushdown for integral partition columns in metastore. Off by default. This
@@ -637,17 +652,17 @@
     </description>
   </property>
   <property>
-    <key>hive.metastore.try.direct.sql</key>
+    <name>hive.metastore.try.direct.sql</name>
     <value>true</value>
     <description/>
   </property>
   <property>
-    <key>hive.metastore.try.direct.sql.ddl</key>
+    <name>hive.metastore.try.direct.sql.ddl</name>
     <value>true</value>
     <description/>
   </property>
   <property>
-    <key>hive.metastore.disallow.incompatible.col.type.changes</key>
+    <name>hive.metastore.disallow.incompatible.col.type.changes</name>
     <value>false</value>
     <description>
       If true (default is false), ALTER TABLE operations which change the type of 
@@ -665,17 +680,17 @@
     </description>
   </property>
   <property>
-    <key>hive.table.parameters.default</key>
+    <name>hive.table.parameters.default</name>
     <value/>
     <description>Default property values for newly created tables</description>
   </property>
   <property>
-    <key>hive.ddl.createtablelike.properties.whitelist</key>
+    <name>hive.ddl.createtablelike.properties.whitelist</name>
     <value/>
     <description>Table Properties to copy over when executing a Create Table Like.</description>
   </property>
   <property>
-    <key>hive.metastore.rawstore.impl</key>
+    <name>hive.metastore.rawstore.impl</name>
     <value>org.apache.hadoop.hive.metastore.ObjectStore</value>
     <description>
       Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. 
@@ -683,42 +698,42 @@
     </description>
   </property>
   <property>
-    <key>javax.jdo.option.ConnectionDriverName</key>
+    <name>javax.jdo.option.ConnectionDriverName</name>
     <value>org.apache.derby.jdbc.EmbeddedDriver</value>
     <description>Driver class name for a JDBC metastore</description>
   </property>
   <property>
-    <key>javax.jdo.PersistenceManagerFactoryClass</key>
+    <name>javax.jdo.PersistenceManagerFactoryClass</name>
     <value>org.datanucleus.api.jdo.JDOPersistenceManagerFactory</value>
     <description>class implementing the jdo persistence</description>
   </property>
   <property>
-    <key>hive.metastore.expression.proxy</key>
+    <name>hive.metastore.expression.proxy</name>
     <value>org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore</value>
     <description/>
   </property>
   <property>
-    <key>javax.jdo.option.DetachAllOnCommit</key>
+    <name>javax.jdo.option.DetachAllOnCommit</name>
     <value>true</value>
     <description>Detaches all objects from session so that they can be used after transaction is committed</description>
   </property>
   <property>
-    <key>javax.jdo.option.NonTransactionalRead</key>
+    <name>javax.jdo.option.NonTransactionalRead</name>
     <value>true</value>
     <description>Reads outside of transactions</description>
   </property>
   <property>
-    <key>javax.jdo.option.ConnectionUserName</key>
+    <name>javax.jdo.option.ConnectionUserName</name>
     <value>APP</value>
     <description>Username to use against metastore database</description>
   </property>
   <property>
-    <key>hive.metastore.end.function.listeners</key>
+    <name>hive.metastore.end.function.listeners</name>
     <value/>
     <description>List of comma separated listeners for the end of metastore functions.</description>
   </property>
   <property>
-    <key>hive.metastore.partition.inherit.table.properties</key>
+    <name>hive.metastore.partition.inherit.table.properties</name>
     <value/>
     <description>
       List of comma separated keys occurring in table properties which will get inherited to newly created partitions. 
@@ -726,7 +741,7 @@
     </description>
   </property>
   <property>
-    <key>hive.metadata.export.location</key>
+    <name>hive.metadata.export.location</name>
     <value/>
     <description>
       When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, 
@@ -735,7 +750,7 @@
     </description>
   </property>
   <property>
-    <key>hive.metadata.move.exported.metadata.to.trash</key>
+    <name>hive.metadata.move.exported.metadata.to.trash</name>
     <value>true</value>
     <description>
       When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, 
@@ -744,17 +759,17 @@
     </description>
   </property>
   <property>
-    <key>hive.cli.errors.ignore</key>
+    <name>hive.cli.errors.ignore</name>
     <value>false</value>
     <description/>
   </property>
   <property>
-    <key>hive.cli.print.current.db</key>
+    <name>hive.cli.print.current.db</name>
     <value>false</value>
     <description>Whether to include the current database in the Hive prompt.</description>
   </property>
   <property>
-    <key>hive.cli.prompt</key>
+    <name>hive.cli.prompt</name>
     <value>hive</value>
     <description>
       Command line prompt configuration value. Other hiveconf can be used in this configuration value. 
@@ -762,7 +777,7 @@
     </description>
   </property>
   <property>
-    <key>hive.cli.pretty.output.num.cols</key>
+    <name>hive.cli.pretty.output.num.cols</name>
     <value>-1</value>
     <description>
       The number of columns to use when formatting output generated by the DESCRIBE PRETTY table_name command.
@@ -770,85 +785,86 @@
     </description>
   </property>
   <property>
-    <key>hive.metastore.fs.handler.class</key>
+    <name>hive.metastore.fs.handler.class</name>
     <value>org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl</value>
     <description/>
   </property>
   <property>
-    <key>hive.session.id</key>
+    <name>hive.session.id</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.session.silent</key>
+    <name>hive.session.silent</name>
     <value>false</value>
     <description/>
   </property>
   <property>
-    <key>hive.session.history.enabled</key>
+    <name>hive.session.history.enabled</name>
     <value>false</value>
     <description>Whether to log Hive query, query plan, runtime statistics etc.</description>
   </property>
   <property>
-    <key>hive.query.string</key>
+    <name>hive.query.string</name>
     <value/>
     <description>Query being executed (might be multiple per a session)</description>
   </property>
   <property>
-    <key>hive.query.id</key>
+    <name>hive.query.id</name>
     <value/>
     <description>ID for query being executed (might be multiple per a session)</description>
   </property>
   <property>
-    <key>hive.jobname.length</key>
+    <name>hive.jobname.length</name>
     <value>50</value>
     <description>max jobname length</description>
   </property>
   <property>
-    <key>hive.jar.path</key>
+    <name>hive.jar.path</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.aux.jars.path</key>
+    <name>hive.aux.jars.path</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.added.files.path</key>
+    <name>hive.added.files.path</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.added.jars.path</key>
+    <name>hive.added.jars.path</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.added.archives.path</key>
+    <name>hive.added.archives.path</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.auto.progress.timeout</key>
-    <value>0</value>
+    <name>hive.auto.progress.timeout</name>
+    <value>0s</value>
     <description>
-      How long to run autoprogressor for the script/UDTF operators (in seconds).
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      How long to run autoprogressor for the script/UDTF operators.
       Set to 0 for forever.
     </description>
   </property>
   <property>
-    <key>hive.table.name</key>
+    <name>hive.table.name</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.partition.name</key>
+    <name>hive.partition.name</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.script.auto.progress</key>
+    <name>hive.script.auto.progress</name>
     <value>false</value>
     <description>
       Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker 
@@ -858,7 +874,7 @@
     </description>
   </property>
   <property>
-    <key>hive.script.operator.id.env.var</key>
+    <name>hive.script.operator.id.env.var</name>
     <value>HIVE_SCRIPT_OPERATOR_ID</value>
     <description>
       Name of the environment variable that holds the unique script operator ID in the user's 
@@ -866,12 +882,12 @@
     </description>
   </property>
   <property>
-    <key>hive.script.operator.truncate.env</key>
+    <name>hive.script.operator.truncate.env</name>
     <value>false</value>
     <description>Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)</description>
   </property>
   <property>
-    <key>hive.mapred.mode</key>
+    <name>hive.mapred.mode</name>
     <value>nonstrict</value>
     <description>
       The mode in which the Hive operations are being performed. 
@@ -884,22 +900,22 @@
     </description>
   </property>
   <property>
-    <key>hive.alias</key>
+    <name>hive.alias</name>
     <value/>
     <description/>
   </property>
   <property>
-    <key>hive.map.aggr</key>
+    <name>hive.map.aggr</name>
     <value>true</value>
     <description>Whether to use map-side aggregation in Hive Group By queries</description>
   </property>
   <property>
-    <key>hive.groupby.skewindata</key>
+    <name>hive.groupby.skewindata</name>
     <value>false</value>
     <description>Whether there is skew in data to optimize group by queries</description>
   </property>
   <property>
-    <key>hive.optimize.multigroupby.common.distincts</key>
+    <name>hive.optimize.multigroupby.common.distincts</name>
     <value>true</value>
     <description>
       Whether to optimize a multi-groupby query with the same distinct.
@@ -918,27 +934,27 @@
     </description>
   </property>
   <property>
-    <key>hive.join.emit.interval</key>
+    <name>hive.join.emit.interval</name>
     <value>1000</value>
     <description>How many rows in the right-most join operand Hive should buffer before emitting the join result.</description>
   </property>
   <property>
-    <key>hive.join.cache.size</key>
+    <name>hive.join.cache.size</name>
     <value>25000</value>
     <description>How many rows in the joining tables (except the streaming table) should be cached in memory.</description>
   </property>
   <property>
-    <key>hive.cbo.enable</key>
-    <value>false</value>
+    <name>hive.cbo.enable</name>
+    <value>true</value>
     <description>Flag to control enabling Cost Based Optimizations using Optiq framework.</description>
   </property>
   <property>
-    <key>hive.mapjoin.bucket.cache.size</key>
+    <name>hive.mapjoin.bucket.cache.size</name>
     <value>100</value>
     <description/>
   </property>
   <property>
-    <key>hive.mapjoin.optimized.hashtable</key>
+    <name>hive.mapjoin.optimized.hashtable</name>
     <value>true</value>
     <description>
       Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,
@@ -946,7 +962,7 @@
     </description>
   </property>
   <property>
-    <key>hive.mapjoin.optimized.keys</key>
+    <name>hive.mapjoin.optimized.keys</name>
     <value>true</value>
     <description>
       Whether MapJoin hashtable should use optimized (size-wise), keys, allowing the table to take less
@@ -954,7 +970,7 @@
     </description>
   </property>
   <property>
-    <key>hive.mapjoin.lazy.hashtable</key>
+    <name>hive.mapjoin.lazy.hashtable</name>
     <value>true</value>
     <description>
       Whether MapJoin hashtable should deserialize values on demand. Depending on how many values in
@@ -963,7 +979,7 @@
     </description>
   </property>
   <property>
-    <key>hive.mapjoin.optimized.hashtable.wbsize</key>
+    <name>hive.mapjoin.optimized.hashtable.wbsize</name>
     <value>10485760</value>
     <description>
       Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to
@@ -972,27 +988,27 @@
     </description>
   </property>
   <property>
-    <key>hive.smbjoin.cache.rows</key>
+    <name>hive.smbjoin.cache.rows</name>
     <value>10000</value>
     <description>How many rows with the same key value should be cached in memory per smb joined table.</description>
   </property>
   <property>
-    <key>hive.groupby.mapaggr.checkinterval</key>
+    <name>hive.groupby.mapaggr.checkinterval</name>
     <value>100000</value>
     <description>Number of rows after which size of the grouping keys/aggregation classes is performed</description>
   </property>
   <property>
-    <key>hive.map.aggr.hash.percentmemory</key>
+    <name>hive.map.aggr.hash.percentmemory</name>
     <value>0.5</value>
     <description>Portion of total memory to be used by map-side group aggregation hash table</description>
   </property>
   <property>
-    <key>hive.mapjoin.followby.map.aggr.hash.percentmemory</key>
+    <name>hive.mapjoin.followby.map.aggr.hash.percentmemory</name>
     <value>0.3</value>
     <description>Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join</description>
   </property>
   <property>
-    <key>hive.map.aggr.hash.force.flush.memory.threshold</key>
+    <name>hive.map.aggr.hash.force.flush.memory.threshold</name>
     <value>0.9</value>
     <description>
       The max memory to be used by map-side group aggregation hash table.
@@ -1000,7 +1016,7 @@
     </description>
   </property>
   <property>
-    <key>hive.map.aggr.hash.min.reduction</key>
+    <name>hive.map.aggr.hash.min.reduction</name>
     <value>0.5</value>
     <description>
       Hash aggregation will be turned off if the ratio between hash  table size and input rows is bigger than this number. 
@@ -1008,7 +1024,7 @@
     </description>
   </property>
   <property>
-    <key>hive.multigroupby.singlereducer</key>
+    <name>hive.multigroupby.singlereducer</name>
     <value>true</value>
     <description>
       Whether to optimize multi group by query to generate single M/R  job plan. If the multi group by query has 
@@ -1016,7 +1032,7 @@
     </description>
   </property>
   <property>
-    <key>hive.map.groupby.sorted</key>
+    <name>hive.map.groupby.sorted</name>
     <value>false</value>
     <description>
       If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform 
@@ -1025,7 +1041,7 @@
     </description>
   </property>
   <property>
-    <key>hive.map.groupby.sorted.testmode</key>
+    <name>hive.map.groupby.sorted.testmode</name>
     <value>false</value>
     <description>
       If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform 
@@ -1034,12 +1050,12 @@
     </description>
   </property>
   <property>
-    <key>hive.groupby.orderby.position.alias</key>
+    <name>hive.groupby.orderby.position.alias</name>
     <value>false</value>
     <description>Whether to enable using Column Position Alias in Group By or Order By</description>
   </property>
   <property>
-    <key>hive.new.job.grouping.set.cardinality</key>
+    <name>hive.new.job.grouping.set.cardinality</name>
     <value>30</value>
     <description>
       Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.
@@ -1054,7 +1070,7 @@
     </description>
   </property>
   <property>
-    <key>hive.udtf.auto.progress</key>
+    <name>hive.udtf.auto.progress</name>
     <value>false</value>
     <description>
       Whether Hive should automatically send progress information to TaskTracker 
@@ -1063,45 +1079,48 @@
     </description>
   </property>
   <property>
-    <key>hive.default.fileformat</key>
+    <name>hive.default.fileformat</name>
     <value>TextFile</value>
     <description>
-      Default file format for CREATE TABLE statement. 
-      Options are TextFile, SequenceFile, RCfile and ORC. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]
+      Expects one of [textfile, sequencefile, rcfile, orc].
+      Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]
     </description>
   </property>
   <property>
-    <key>hive.query.result.fileformat</key>
+    <name>hive.query.result.fileformat</name>
     <value>TextFile</value>
-    <description>Default file format for storing result of the query. Allows TextFile, SequenceFile and RCfile</description>
+    <description>
+      Expects one of [textfile, sequencefile, rcfile].
+      Default file format for storing result of the query.
+    </description>
   </property>
   <property>
-    <key>hive.fileformat.check</key>
+    <name>hive.fileformat.check</name>
     <value>true</value>
     <description>Whether to check file format or not when loading data files</description>
   </property>
   <property>
-    <key>hive.default.rcfile.serde</key>
+    <name>hive.default.rcfile.serde</name>
     <value>org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe</value>
     <description>The default SerDe Hive will use for the RCFile format</description>
   </property>
   <property>
-    <key>hive.default.serde</key>
+    <name>hive.default.serde</name>
     <value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
     <description>The default SerDe Hive will use for storage formats that do not specify a SerDe.</description>
   </property>
   <property>
-    <key>hive.serdes.using.metastore.for.schema</key>
+    <name>hive.serdes.using.metastore.for.schema</name>
     <value>org.apache.hadoop.hive.ql.io.orc.OrcSerde,org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe,org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe,org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe,org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe,org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe,org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe,org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe</value>
     <description>SerDes retriving schema from metastore. This an internal parameter. Check with the hive dev. team</description>
   </property>
   <property>
-    <key>hive.querylog.location</key>
+    <name>hive.querylog.location</name>
     <value>${system:java.io.tmpdir}/${system:user.name}</value>
     <description>Location of Hive run time structured log file</description>
   </property>
   <property>
-    <key>hive.querylog.enable.plan.progress</key>
+    <name>hive.querylog.enable.plan.progress</name>
     <value>true</value>
     <description>
       Whether to log the plan's progress every time a job's progress is checked.
@@ -1109,10 +1128,11 @@
     </description>
   </property>
   <property>
-    <key>hive.querylog.plan.progress.interval</key>
-    <value>60000</value>
+    <name>hive.querylog.plan.progress.interval</name>
+    <value>60000ms</value>
     <description>
-      The interval to wait between logging the plan's progress in milliseconds.
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified.
+      The interval to wait between logging the plan's progress.
       If there is a whole number percentage change in the progress of the mappers or the reducers,
       the progress is logged regardless of this value.
       The actual interval will be the ceiling of (this value divided by the value of
@@ -1123,22 +1143,22 @@
     </description>
   </property>
   <property>
-    <key>hive.script.serde</key>
+    <name>hive.script.serde</name>
     <value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
     <description>The default SerDe for transmitting input data to and reading output data from the user scripts. </description>
   </property>
   <property>
-    <key>hive.script.recordreader</key>
+    <name>hive.script.recordreader</name>
     <value>org.apache.hadoop.hive.ql.exec.TextRecordReader</value>
     <description>The default record reader for reading data from the user scripts. </description>
   </property>
   <property>
-    <key>hive.script.recordwriter</key>
+    <name>hive.script.recordwriter</name>
     <value>org.apache.hadoop.hive.ql.exec.TextRecordWriter</value>
     <description>The default record writer for writing data to the user scripts. </description>
   </property>
   <property>
-    <key>hive.transform.escape.input</key>
+    <name>hive.transform.escape.input</name>
     <value>false</value>
     <description>
       This adds an option to escape special chars (newlines, carriage returns and
@@ -1147,7 +1167,7 @@
     </description>
   </property>
   <property>
-    <key>hive.binary.record.max.length</key>
+    <name>hive.binary.record.max.length</name>
     <value>1000</value>
     <description>
       Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. 
@@ -1155,27 +1175,27 @@
     </description>
   </property>
   <property>
-    <key>hive.hwi.listen.host</key>
+    <name>hive.hwi.listen.host</name>
     <value>0.0.0.0</value>
     <description>This is the host address the Hive Web Interface will listen on</description>
   </property>
   <property>
-    <key>hive.hwi.listen.port</key>
+    <name>hive.hwi.listen.port</name>
     <value>9999</value>
     <description>This is the port the Hive Web Interface will listen on</description>
   </property>
   <property>
-    <key>hive.hwi.war.file</key>
+    <name>hive.hwi.war.file</name>
     <value>${env:HWI_WAR_FILE}</value>
     <description>This sets the path to the HWI war file, relative to ${HIVE_HOME}. </description>
   </property>
   <property>
-    <key>hive.mapred.local.mem</key>
+    <name>hive.mapred.local.mem</name>
     <value>0</value>
     <description>mapper/reducer memory in local mode</description>
   </property>
   <property>
-    <key>hive.mapjoin.smalltable.filesize</key>
+    <name>hive.mapjoin.smalltable.filesize</name>
     <value>25000000</value>
     <description>
       The threshold for the input file size of the small tables; if the file size is smaller 
@@ -1183,22 +1203,22 @@
     </description>
   </property>
   <property>
-    <key>hive.sample.seednumber</key>
+    <name>hive.sample.seednumber</name>
     <value>0</value>
     <description>A number used to percentage sampling. By changing this number, user will change the subsets of data sampled.</description>
   </property>
   <property>
-    <key>hive.test.mode</key>
+    <name>hive.test.mode</name>
     <value>false</value>
     <description>Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.</description>
   </property>
   <property>
-    <key>hive.test.mode.prefix</key>
+    <name>hive.test.mode.prefix</name>
     <value>test_</value>
     <description>In test mode, specfies prefixes for the output table</description>
   </property>
   <property>
-    <key>hive.test.mode.samplefreq</key>
+    <name>hive.test.mode.samplefreq</name>
     <value>32</value>
     <description>
       In test mode, specfies sampling frequency for table, which is not bucketed,
@@ -1210,42 +1230,42 @@
     </description>
   </property>
   <property>
-    <key>hive.test.mode.nosamplelist</key>
+    <name>hive.test.mode.nosamplelist</name>
     <value/>
     <description>In test mode, specifies comma separated table names which would not apply sampling</description>
   </property>
   <property>
-    <key>hive.test.dummystats.aggregator</key>
+    <name>hive.test.dummystats.aggregator</name>
     <value/>
     <description>internal variable for test</description>
   </property>
   <property>
-    <key>hive.test.dummystats.publisher</key>
+    <name>hive.test.dummystats.publisher</name>
     <value/>
     <description>internal variable for test</description>
   </property>
   <property>
-    <key>hive.merge.mapfiles</key>
+    <name>hive.merge.mapfiles</name>
     <value>true</value>
     <description>Merge small files at the end of a map-only job</description>
   </property>
   <property>
-    <key>hive.merge.mapredfiles</key>
+    <name>hive.merge.mapredfiles</name>
     <value>false</value>
     <description>Merge small files at the end of a map-reduce job</description>
   </property>
   <property>
-    <key>hive.merge.tezfiles</key>
+    <name>hive.merge.tezfiles</name>
     <value>false</value>
     <description>Merge small files at the end of a Tez DAG</description>
   </property>
   <property>
-    <key>hive.merge.size.per.task</key>
+    <name>hive.merge.size.per.task</name>
     <value>256000000</value>
     <description>Size of merged files at the end of the job</description>
   </property>
   <property>
-    <key>hive.merge.smallfiles.avgsize</key>
+    <name>hive.merge.smallfiles.avgsize</name>
     <value>16000000</value>
     <description>
       When the average output file size of a job is less than this number, Hive will start an additional 
@@ -1254,17 +1274,17 @@
     </description>
   </property>
   <property>
-    <key>hive.merge.rcfile.block.level</key>
+    <name>hive.merge.rcfile.block.level</name>
     <value>true</value>
     <description/>
   </property>
   <property>
-    <key>hive.merge.input.format.block.level</key>
+    <name>hive.merge.input.format.block.level</name>
     <value>org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat</value>
     <description/>
   </property>
   <property>
-    <key>hive.merge.orcfile.stripe.level</key>
+    <name>hive.merge.orcfile.stripe.level</name>
     <value>true</value>
     <description>
       When hive.merge.mapfiles or hive.merge.mapredfiles is enabled while writing a
@@ -1274,17 +1294,17 @@
     </description>
   </property>
   <property>
-    <key>hive.merge.input.format.stripe.level</key>
+    <name>hive.merge.input.format.stripe.level</name>
     <value>org.apache.hadoop.hive.ql.io.orc.OrcFileStripeMergeInputFormat</value>
     <description>Input file format to use for ORC stripe level merging (for internal use only)</description>
   </property>
   <property>
-    <key>hive.merge.current.job.has.dynamic.partitions</key>
+    <name>hive.merge.current.job.has.dynamic.partitions</name>
     <value>false</value>
     <description/>
   </property>
   <property>
-    <key>hive.exec.rcfile.use.explicit.header</key>
+    <name>hive.exec.rcfile.use.explicit.header</name>
     <value>true</value>
     <description>
       If this is set the header for RCFiles will simply be RCF.  If this is not
@@ -1293,52 +1313,52 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.rcfile.use.sync.cache</key>
+    <name>hive.exec.rcfile.use.sync.cache</name>
     <value>true</value>
     <description/>
   </property>
   <property>
-    <key>hive.io.rcfile.record.interval</key>
+    <name>hive.io.rcfile.record.interval</name>
     <value>2147483647</value>
     <description/>
   </property>
   <property>
-    <key>hive.io.rcfile.column.number.conf</key>
+    <name>hive.io.rcfile.column.number.conf</name>
     <value>0</value>
     <description/>
   </property>
   <property>
-    <key>hive.io.rcfile.tolerate.corruptions</key>
+    <name>hive.io.rcfile.tolerate.corruptions</name>
     <value>false</value>
     <description/>
   </property>
   <property>
-    <key>hive.io.rcfile.record.buffer.size</key>
+    <name>hive.io.rcfile.record.buffer.size</name>
     <value>4194304</value>
     <description/>
   </property>
   <property>
-    <key>hive.exec.orc.memory.pool</key>
+    <name>hive.exec.orc.memory.pool</name>
     <value>0.5</value>
     <description>Maximum fraction of heap that can be used by ORC file writers</description>
   </property>
   <property>
-    <key>hive.exec.orc.write.format</key>
+    <name>hive.exec.orc.write.format</name>
     <value/>
     <description>Define the version of the file to write</description>
   </property>
   <property>
-    <key>hive.exec.orc.default.stripe.size</key>
+    <name>hive.exec.orc.default.stripe.size</name>
     <value>67108864</value>
     <description>Define the default ORC stripe size</description>
   </property>
   <property>
-    <key>hive.exec.orc.default.block.size</key>
+    <name>hive.exec.orc.default.block.size</name>
     <value>268435456</value>
     <description>Define the default file system block size for ORC files.</description>
   </property>
   <property>
-    <key>hive.exec.orc.dictionary.key.size.threshold</key>
+    <name>hive.exec.orc.dictionary.key.size.threshold</name>
     <value>0.8</value>
     <description>
       If the number of keys in a dictionary is greater than this fraction of the total number of
@@ -1346,22 +1366,31 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.orc.default.row.index.stride</key>
+    <name>hive.exec.orc.default.row.index.stride</name>
     <value>10000</value>
     <description>Define the default ORC index stride</description>
   </property>
   <property>
-    <key>hive.exec.orc.default.buffer.size</key>
+    <name>hive.orc.row.index.stride.dictionary.check</name>
+    <value>true</value>
+    <description>
+      If enabled dictionary check will happen after first row index stride (default 10000 rows)
+      else dictionary check will happen before writing first stripe. In both cases, the decision
+      to use dictionary or not will be retained thereafter.
+    </description>
+  </property>
+  <property>
+    <name>hive.exec.orc.default.buffer.size</name>
     <value>262144</value>
     <description>Define the default ORC buffer size</description>
   </property>
   <property>
-    <key>hive.exec.orc.default.block.padding</key>
+    <name>hive.exec.orc.default.block.padding</name>
     <value>true</value>
     <description>Define the default block padding</description>
   </property>
   <property>
-    <key>hive.exec.orc.block.padding.tolerance</key>
+    <name>hive.exec.orc.block.padding.tolerance</name>
     <value>0.05</value>
     <description>
       Define the tolerance for block padding as a percentage of stripe size.
@@ -1371,22 +1400,22 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.orc.default.compress</key>
+    <name>hive.exec.orc.default.compress</name>
     <value>ZLIB</value>
     <description>Define the default compression codec for ORC file</description>
   </property>
   <property>
-    <key>hive.exec.orc.encoding.strategy</key>
+    <name>hive.exec.orc.encoding.strategy</name>
     <value>SPEED</value>
     <description>
+      Expects one of [speed, compression].
       Define the encoding strategy to use while writing data. Changing this will
       only affect the light weight encoding for integers. This flag will not
       change the compression level of higher level compression codec (like ZLIB).
-      Possible options are SPEED and COMPRESSION.
     </description>
   </property>
   <property>
-    <key>hive.orc.splits.include.file.footer</key>
+    <name>hive.orc.splits.include.file.footer</name>
     <value>false</value>
     <description>
       If turned on splits generated by orc will include metadata about the stripes in the file. This
@@ -1394,17 +1423,17 @@
     </description>
   </property>
   <property>
-    <key>hive.orc.cache.stripe.details.size</key>
+    <name>hive.orc.cache.stripe.details.size</name>
     <value>10000</value>
     <description>Cache size for keeping meta info about orc splits cached in the client.</description>
   </property>
   <property>
-    <key>hive.orc.compute.splits.num.threads</key>
+    <name>hive.orc.compute.splits.num.threads</name>
     <value>10</value>
     <description>How many threads orc should use to create splits in parallel.</description>
   </property>
   <property>
-    <key>hive.exec.orc.skip.corrupt.data</key>
+    <name>hive.exec.orc.skip.corrupt.data</name>
     <value>false</value>
     <description>
       If ORC reader encounters corrupt data, this value will be used to determine
@@ -1412,12 +1441,12 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.orc.zerocopy</key>
+    <name>hive.exec.orc.zerocopy</name>
     <value>false</value>
     <description>Use zerocopy reads with ORC.</description>
   </property>
   <property>
-    <key>hive.lazysimple.extended_boolean_literal</key>
+    <name>hive.lazysimple.extended_boolean_literal</name>
     <value>false</value>
     <description>
       LazySimpleSerde uses this property to determine if it treats 'T', 't', 'F', 'f',
@@ -1427,7 +1456,7 @@
     </description>
   </property>
   <property>
-    <key>hive.optimize.skewjoin</key>
+    <name>hive.optimize.skewjoin</name>
     <value>false</value>
     <description>
       Whether to enable skew join optimization. 
@@ -1439,12 +1468,12 @@
     </description>
   </property>
   <property>
-    <key>hive.auto.convert.join</key>
+    <name>hive.auto.convert.join</name>
     <value>true</value>
     <description>Whether Hive enables the optimization about converting common join into mapjoin based on the input file size</description>
   </property>
   <property>
-    <key>hive.auto.convert.join.noconditionaltask</key>
+    <name>hive.auto.convert.join.noconditionaltask</name>
     <value>true</value>
     <description>
       Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. 
@@ -1453,7 +1482,7 @@
     </description>
   </property>
   <property>
-    <key>hive.auto.convert.join.noconditionaltask.size</key>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
     <value>10000000</value>
     <description>
       If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. 
@@ -1462,7 +1491,7 @@
     </description>
   </property>
   <property>
-    <key>hive.auto.convert.join.use.nonstaged</key>
+    <name>hive.auto.convert.join.use.nonstaged</name>
     <value>false</value>
     <description>
       For conditional joins, if input stream from a small alias can be directly applied to join operator without 
@@ -1471,7 +1500,7 @@
     </description>
   </property>
   <property>
-    <key>hive.skewjoin.key</key>
+    <name>hive.skewjoin.key</name>
     <value>100000</value>
     <description>
       Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,
@@ -1479,7 +1508,7 @@
     </description>
   </property>
   <property>
-    <key>hive.skewjoin.mapjoin.map.tasks</key>
+    <name>hive.skewjoin.mapjoin.map.tasks</name>
     <value>10000</value>
     <description>
       Determine the number of map task used in the follow up map join job for a skew join.
@@ -1487,7 +1516,7 @@
     </description>
   </property>
   <property>
-    <key>hive.skewjoin.mapjoin.min.split</key>
+    <name>hive.skewjoin.mapjoin.min.split</name>
     <value>33554432</value>
     <description>
       Determine the number of map task at most used in the follow up map join job for a skew join by specifying 
@@ -1495,27 +1524,27 @@
     </description>
   </property>
   <property>
-    <key>hive.heartbeat.interval</key>
+    <name>hive.heartbeat.interval</name>
     <value>1000</value>
     <description>Send a heartbeat after this interval - used by mapjoin and filter operators</description>
   </property>
   <property>
-    <key>hive.limit.row.max.size</key>
+    <name>hive.limit.row.max.size</name>
     <value>100000</value>
     <description>When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least.</description>
   </property>
   <property>
-    <key>hive.limit.optimize.limit.file</key>
+    <name>hive.limit.optimize.limit.file</name>
     <value>10</value>
     <description>When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample.</description>
   </property>
   <property>
-    <key>hive.limit.optimize.enable</key>
+    <name>hive.limit.optimize.enable</name>
     <value>false</value>
     <description>Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first.</description>
   </property>
   <property>
-    <key>hive.limit.optimize.fetch.max</key>
+    <name>hive.limit.optimize.fetch.max</name>
     <value>50000</value>
     <description>
       Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. 
@@ -1523,12 +1552,12 @@
     </description>
   </property>
   <property>
-    <key>hive.limit.pushdown.memory.usage</key>
+    <name>hive.limit.pushdown.memory.usage</name>
     <value>-1.0</value>
     <description>The max memory to be used for hash in RS operator for top K selection.</description>
   </property>
   <property>
-    <key>hive.limit.query.max.table.partition</key>
+    <name>hive.limit.query.max.table.partition</name>
     <value>-1</value>
     <description>
       This controls how many partitions can be scanned for each partitioned table.
@@ -1536,22 +1565,22 @@
     </description>
   </property>
   <property>
-    <key>hive.hashtable.key.count.adjustment</key>
+    <name>hive.hashtable.key.count.adjustment</name>
     <value>1.0</value>
     <description>Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate of the number of keys is divided by this value. If the value is 0, statistics are not usedand hive.hashtable.initialCapacity is used instead.</description>
   </property>
   <property>
-    <key>hive.hashtable.initialCapacity</key>
+    <name>hive.hashtable.initialCapacity</name>
     <value>100000</value>
     <description>Initial capacity of mapjoin hashtable if statistics are absent, or if hive.hashtable.stats.key.estimate.adjustment is set to 0</description>
   </property>
   <property>
-    <key>hive.hashtable.loadfactor</key>
+    <name>hive.hashtable.loadfactor</name>
     <value>0.75</value>
     <description/>
   </property>
   <property>
-    <key>hive.mapjoin.followby.gby.localtask.max.memory.usage</key>
+    <name>hive.mapjoin.followby.gby.localtask.max.memory.usage</name>
     <value>0.55</value>
     <description>
       This number means how much memory the local task can take to hold the key/value into an in-memory hash table 
@@ -1560,7 +1589,7 @@
     </description>
   </property>
   <property>
-    <key>hive.mapjoin.localtask.max.memory.usage</key>
+    <name>hive.mapjoin.localtask.max.memory.usage</name>
     <value>0.9</value>
     <description>
       This number means how much memory the local task can take to hold the key/value into an in-memory hash table. 
@@ -1569,37 +1598,37 @@
     </description>
   </property>
   <property>
-    <key>hive.mapjoin.check.memory.rows</key>
+    <name>hive.mapjoin.check.memory.rows</name>
     <value>100000</value>
     <description>The number means after how many rows processed it needs to check the memory usage</description>
   </property>
   <property>
-    <key>hive.debug.localtask</key>
+    <name>hive.debug.localtask</name>
     <value>false</value>
     <description/>
   </property>
   <property>
-    <key>hive.input.format</key>
+    <name>hive.input.format</name>
     <value>org.apache.hadoop.hive.ql.io.CombineHiveInputFormat</value>
     <description>The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat.</description>
   </property>
   <property>
-    <key>hive.tez.input.format</key>
+    <name>hive.tez.input.format</name>
     <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
     <description>The default input format for tez. Tez groups splits in the AM.</description>
   </property>
   <property>
-    <key>hive.tez.container.size</key>
+    <name>hive.tez.container.size</name>
     <value>-1</value>
     <description>By default Tez will spawn containers of the size of a mapper. This can be used to overwrite.</description>
   </property>
   <property>
-    <key>hive.tez.java.opts</key>
+    <name>hive.tez.java.opts</name>
     <value/>
     <description>By default Tez will use the Java options from map tasks. This can be used to overwrite.</description>
   </property>
   <property>
-    <key>hive.tez.log.level</key>
+    <name>hive.tez.log.level</name>
     <value>INFO</value>
     <description>
       The log level to use for tasks executing as part of the DAG.
@@ -1607,17 +1636,17 @@
     </description>
   </property>
   <property>
-    <key>hive.enforce.bucketing</key>
+    <name>hive.enforce.bucketing</name>
     <value>false</value>
     <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
   </property>
   <property>
-    <key>hive.enforce.sorting</key>
+    <name>hive.enforce.sorting</name>
     <value>false</value>
     <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
   </property>
   <property>
-    <key>hive.optimize.bucketingsorting</key>
+    <name>hive.optimize.bucketingsorting</name>
     <value>true</value>
     <description>
       If hive.enforce.bucketing or hive.enforce.sorting is true, don't create a reducer for enforcing 
@@ -1627,17 +1656,17 @@
     </description>
   </property>
   <property>
-    <key>hive.mapred.partitioner</key>
+    <name>hive.mapred.partitioner</name>
     <value>org.apache.hadoop.hive.ql.io.DefaultHivePartitioner</value>
     <description/>
   </property>
   <property>
-    <key>hive.enforce.sortmergebucketmapjoin</key>
+    <name>hive.enforce.sortmergebucketmapjoin</name>
     <value>false</value>
     <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?</description>
   </property>
   <property>
-    <key>hive.enforce.bucketmapjoin</key>
+    <name>hive.enforce.bucketmapjoin</name>
     <value>false</value>
     <description>
       If the user asked for bucketed map-side join, and it cannot be performed, 
@@ -1647,12 +1676,12 @@
     </description>
   </property>
   <property>
-    <key>hive.auto.convert.sortmerge.join</key>
+    <name>hive.auto.convert.sortmerge.join</name>
     <value>false</value>
     <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join.</description>
   </property>
   <property>
-    <key>hive.auto.convert.sortmerge.join.bigtable.selection.policy</key>
+    <name>hive.auto.convert.sortmerge.join.bigtable.selection.policy</name>
     <value>org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ</value>
     <description>
       The policy to choose the big table for automatic conversion to sort-merge join. 
@@ -1667,7 +1696,7 @@
     </description>
   </property>
   <property>
-    <key>hive.auto.convert.sortmerge.join.to.mapjoin</key>
+    <name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
     <value>false</value>
     <description>
       If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join, 
@@ -1681,82 +1710,82 @@
     </description>
   </property>
   <property>
-    <key>hive.exec.script.trust</key>
+    <name>hive.exec.script.trust</name>
     <value>false</value>
     <description/>
   </property>
   <property>
-    <key>hive.exec.rowoffset</key>
+    <name>hive.exec.rowoffset</name>
     <value>false</value>
     <description>Whether to provide the row offset virtual column</description>
   </property>
   <property>
-    <key>hive.hadoop.supports.splittable.combineinputformat</key>
+    <name>hive.hadoop.supports.splittable.combineinputformat</name>
     <value>false</value>
     <description/>
   </property>
   <property>
-    <key>hive.optimize.index.filter</key>
+    <name>hive.optimize.index.filter</name>
     <value>false</value>
     <description>Whether to enable automatic use of indexes</description>
   </property>
   <property>
-    <key>hive.optimize.index.autoupdate</key>
+    <name>hive.optimize.index.autoupdate</name>
     <value>false</value>
     <description>Whether to update stale indexes automatically</description>
   </property>
   <property>
-    <key>hive.optimize.ppd</key>
+    <name>hive.optimize.ppd</name>
     <value>true</value>
     <description>Whether to enable predicate pushdown</description>
   </property>
   <property>
-    <key>hive.ppd.recognizetransivity</key>
+    <name>hive.ppd.recognizetransivity</name>
     <value>true</value>
     <description>Whether to transitively replicate predicate filters over equijoin conditions.</description>
   </property>
   <property>
-    <key>hive.ppd.remove.duplicatefilters</key>
+    <name>hive.ppd.remove.duplicatefilters</name>
     <value>true</value>
     <description>Whether to push predicates down into storage handlers.  Ignored when hive.optimize.ppd is false.</description>
   </property>
   <property>
-    <key>hive.optimize.constant.propagation</key>
+    <name>hive.optimize.constant.propagation</name>
     <value>true</value>
     <description>Whether to enable constant propagation optimizer</description>
   </property>
   <property>
-    <key>hive.optimize.metadataonly</key>
+    <name>hive.optimize.metadataonly</name>
     <value>true</value>
     <description/>
   </property>
   <property>
-    <key>hive.optimize.null.scan</key>
+    <name>hive.optimize.null.scan</name>
     <value>true</value>
     <description>Dont scan relations which are guaranteed to not generate any rows</description>
   </property>
   <property>
-    <key>hive.optimize.ppd.storage</key>
+    <name>hive.optimize.ppd.storage</name>
     <value>true</value>
     <description>Whether to push predicates down to storage handlers</description>
   </property>
   <property>
-    <key>hive.optimize.groupby</key>
+    <name>hive.optimize.groupby</name>
     <value>true</value>
     <description>Whether to enable the bucketed group by from bucketed partitions/tables.</description>
   </property>
   <property>
-    <key>hive.optimize.bucketmapjoin</key>
+    <name>hive.optimize.bucketmapjoin</name>
     <value>false</value>
     <description>Whether to try bucket mapjoin</description>
   </property>
   <property>
-    <key>hive.optimize.bucketmapjoin.sortedmerge</key>
+    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
     <value>false</value>
     <description>Whether to try sorted bucket merge map join</description>
   </property>
   <property>
-    <key>hive.optimize.reducededuplication</key>
+    <name>hive.optimize.reducededuplication</name>
     <value>true</value>
     <description>
       Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. 
@@ -1764,7 +1793,7 @@
     </description>
   </property>
   <property>
-    <key>hive.optimize.reducededuplication.min.reducer</key>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
     <value>4</value>
     <description>
       Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS. 
@@ -1773,7 +1802,7 @@
     </description>
   </property>
   <property>
-    <key>hive.optimize.sort.dynamic.partition</key>
+    <name>hive.optimize.sort.dynamic.partition</name>
     <value>true</value>
     <description>
       When enabled dynamic partitioning column will be globally sorted.
@@ -1782,22 +1811,25 @@
     </description>
   </property>
   <property>
-    <key>hive.optimize.sampling.orderby</key>
+    <name>hive.optimize.sampling.orderby</name>
     <value>false</value>
-    <description/>
+    <description>Uses sampling on order-by clause for parallel execution.</description>
   </property>
   <property>
-    <key>hive.optimize.sampling.orderby.number</key>
+    <name>hive.optimize.sampling.orderby.number</name>
     <value>1000</value>
-    <description/>
+    <description>Total number of samples to be obtained.</description>
   </property>
   <property>
-    <key>hive.optimize.sampling.orderby.percent</key>
+    <name>hive.optimize.sampling.orderby.percent</name>
     <value>0.1</value>
-    <description/>
+    <description>
+      Expects value between 0.0f and 1.0f.
+      Probability with which a row will be chosen.
+    </description>
   </property>
   <property>
-    <key>hive.optimize.union.remove</key>
+    <name>hive.optimize.union.remove</name>
     <value>false</value>
     <description>
       Whether to remove the union and push the operators between union and the filesink above union. 
@@ -1812,12 +1844,12 @@
     </description>
   </property>
   <property>
-    <key>hive.optimize.correlation</key>
+    <name>hive.optimize.correlation</name>
     <value>false</value>
     <description>exploit intra-query correlations.</description>
   </property>
   <property>
-    <key>hive.mapred.supports.subdirectories</key>
+    <name>hive.mapred.supports.subdirectories</name>
     <value>false</value>
     <description>
       Whether the version of Hadoop which is running supports sub-directories for tables/partitions. 
@@ -1826,7 +1858,7 @@
     </description>
   </property>
   <property>
-    <key>hive.optimize.skewjoin.compiletime</key>
+    <name>hive.optimize.skewjoin.compiletime</name>
     <value>false</value>
     <description>
       Whether to create a separate plan for skewed keys for the tables in the join.
@@ -1847,72 +1879,78 @@
     </description>
   </property>
   <property>
-    <key>hive.optimize.index.filter.compact.minsize</key>
+    <name>hive.optimize.index.filter.compact.minsize</name>
     <value>5368709120</value>
     <description>Minimum size (in bytes) of the inputs on which a compact index is automatically used.</description>
   </property>
   <property>
-    <key>hive.optimize.index.filter.compact.maxsize</key>
+    <name>hive.optimize.index.filter.compact.maxsize</name>
     <value>-1</value>
     <description>Maximum size (in bytes) of the inputs on which a compact index is automatically used.  A negative number is equivalent to infinity.</description>
   </property>
   <property>
-    <key>hive.index.compact.query.max.entries</key>
+    <name>hive.index.compact.query.max.entries</name>
     <value>10000000</value>
     <description>The maximum number of index entries to read during a query that uses the compact index. Negative value is equivalent to infinity.</description>
   </property>
   <property>
-    <key>hive.index.compact.query.max.size</key>
+    <name>hive.index.compact.query.max.size</name>
     <value>10737418240</value>
     <description>The maximum number of bytes that a query using the compact index can read. Negative value is equivalent to infinity.</description>
   </property>
   <property>
-    <key>hive.index.compact.binary.search</key>
+    <name>hive.index.compact.binary.search</name>
     <value>true</value>
     <description>Whether or not to use a binary search to find the entries in an index table that match the filter, where possible</description>
   </property>
   <property>
-    <key>hive.stats.autogather</key>
+    <name>hive.stats.autogather</name>
     <value>true</value>
     <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
   </property>
   <property>
-    <key>hive.stats.dbclass</key>
+    <name>hive.stats.dbclass</name>
     <value>fs</value>
-    <description>The storage that stores temporary Hive statistics. Currently, jdbc, hbase, counter and custom type are supported.</description>
+    <description>
+      Expects one of the pattern in [jdbc(:.*), hbase, counter, custom, fs].
+      The storage that stores temporary Hive statistics. Currently, jdbc, hbase, counter and custom type are supported.
+    </description>
   </property>
   <property>
-    <key>hive.stats.jdbcdriver</key>
+    <name>hive.stats.jdbcdriver</name>
     <value>org.apache.derby.jdbc.EmbeddedDriver</value>
     <description>The JDBC driver for the database that stores temporary Hive statistics.</description>
   </property>
   <property>
-    <key>hive.stats.dbconnectionstring</key>
+    <name>hive.stats.dbconnectionstring</name>
     <value>jdbc:derby:;databaseName=TempStatsStore;create=true</value>
     <description>The default connection string for the database that stores temporary Hive statistics.</description>
   </property>
   <property>
-    <key>hive.stats.default.publisher</key>
+    <name>hive.stats.default.publisher</name>
     <value/>
     <description>The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type.</description>
   </property>
   <property>
-    <key>hive.stats.default.aggregator</key>
+    <name>hive.stats.default.aggregator</name>
     <value/>
     <description>The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type.</description>
   </property>
   <property>
-    <key>hive.stats.jdbc.timeout</key>
-    <value>30</value>
-    <description>Timeout value (number of seconds) used by JDBC connection and statements.</description>
+    <name>hive.stats.jdbc.timeout</name>
+    <value>30s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Timeout value used by JDBC connection and statements.
+    </description>
   </property>
   <property>
-    <key>hive.stats.atomic</key>
+    <name>hive.stats.atomic</name>
     <value>false</value>
     <description>whether to update metastore stats only if all stats are available</description>
   </property>
   <property>
-    <key>hive.stats.retries.max</key>
+    <name>hive.stats.retries.max</name>
     <value>0</value>
     <description>
       Maximum number of retries when stats publisher/aggregator got an exception updating intermediate database. 
@@ -1920,17 +1958,20 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.retries.wait</key>
-    <value>3000</value>
-    <description>The base waiting window (in milliseconds) before the next retry. The actual wait time is calculated by baseWindow * failures baseWindow * (failure  1) * (random number between [0.0,1.0]).</description>
+    <name>hive.stats.retries.wait</name>
+    <value>3000ms</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is msec if not specified.
+      The base waiting window before the next retry. The actual wait time is calculated by baseWindow * failures baseWindow * (failure + 1) * (random number between [0.0,1.0]).
+    </description>
   </property>
   <property>
-    <key>hive.stats.collect.rawdatasize</key>
+    <name>hive.stats.collect.rawdatasize</name>
     <value>true</value>
     <description>should the raw data size be collected when analyzing tables</description>
   </property>
   <property>
-    <key>hive.client.stats.counters</key>
+    <name>hive.client.stats.counters</name>
     <value/>
     <description>
       Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). 
@@ -1938,7 +1979,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.reliable</key>
+    <name>hive.stats.reliable</name>
     <value>false</value>
     <description>
       Whether queries will fail because stats cannot be collected completely accurately. 
@@ -1947,12 +1988,12 @@
     </description>
   </property>
   <property>
-    <key>hive.analyze.stmt.collect.partlevel.stats</key>
+    <name>hive.analyze.stmt.collect.partlevel.stats</name>
     <value>true</value>
     <description>analyze table T compute statistics for columns. Queries like these should compute partitionlevel stats for partitioned table even when no part spec is specified.</description>
   </property>
   <property>
-    <key>hive.stats.gather.num.threads</key>
+    <name>hive.stats.gather.num.threads</name>
     <value>10</value>
     <description>
       Number of threads used by partialscan/noscan analyze command for partitioned tables.
@@ -1960,7 +2001,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.collect.tablekeys</key>
+    <name>hive.stats.collect.tablekeys</name>
     <value>false</value>
     <description>
       Whether join and group by keys on tables are derived and maintained in the QueryPlan.
@@ -1968,7 +2009,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.collect.scancols</key>
+    <name>hive.stats.collect.scancols</name>
     <value>false</value>
     <description>
       Whether column accesses are tracked in the QueryPlan.
@@ -1976,7 +2017,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.ndv.error</key>
+    <name>hive.stats.ndv.error</name>
     <value>20.0</value>
     <description>
       Standard error expressed in percentage. Provides a tradeoff between accuracy and compute cost. 
@@ -1984,7 +2025,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.key.prefix.max.length</key>
+    <name>hive.stats.key.prefix.max.length</name>
     <value>150</value>
     <description>
       Determines if when the prefix of the key used for intermediate stats collection
@@ -1992,7 +2033,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.key.prefix.reserve.length</key>
+    <name>hive.stats.key.prefix.reserve.length</name>
     <value>24</value>
     <description>
       Reserved length for postfix of stats key. Currently only meaningful for counter type which should
@@ -2001,7 +2042,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.max.variable.length</key>
+    <name>hive.stats.max.variable.length</name>
     <value>100</value>
     <description>
       To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),
@@ -2013,7 +2054,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.list.num.entries</key>
+    <name>hive.stats.list.num.entries</name>
     <value>10</value>
     <description>
       To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),
@@ -2024,7 +2065,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.map.num.entries</key>
+    <name>hive.stats.map.num.entries</name>
     <value>10</value>
     <description>
       To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),
@@ -2035,7 +2076,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.map.parallelism</key>
+    <name>hive.stats.map.parallelism</name>
     <value>1</value>
     <description>
       Hive/Tez optimizer estimates the data size flowing through each of the operators.
@@ -2046,7 +2087,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.fetch.partition.stats</key>
+    <name>hive.stats.fetch.partition.stats</name>
     <value>true</value>
     <description>
       Annotation of operator tree with statistics information requires partition level basic
@@ -2058,7 +2099,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.fetch.column.stats</key>
+    <name>hive.stats.fetch.column.stats</name>
     <value>false</value>
     <description>
       Annotation of operator tree with statistics information requires column statistics.
@@ -2068,7 +2109,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.join.factor</key>
+    <name>hive.stats.join.factor</name>
     <value>1.1</value>
     <description>
       Hive/Tez optimizer estimates the data size flowing through each of the operators. JOIN operator
@@ -2078,7 +2119,7 @@
     </description>
   </property>
   <property>
-    <key>hive.stats.deserialization.factor</key>
+    <name>hive.stats.deserialization.factor</name>
     <value>1.0</value>
     <description>
       Hive/Tez optimizer estimates the data size flowing through each of the operators. In the absence
@@ -2089,7 +2130,7 @@
     </description>
   </property>
   <property>
-    <key>hive.support.concurrency</key>
+    <name>hive.support.concurrency</name>
     <value>false</value>
     <description>
       Whether Hive supports concurrency control or not. 
@@ -2097,27 +2138,30 @@
     </description>
   </property>
   <property>
-    <key>hive.lock.manager</key>
+    <name>hive.lock.manager</name>
     <value>org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager</value>
     <description/>
   </property>
   <property>
-    <key>hive.lock.numretries</key>
+    <name>hive.lock.numretries</name>
     <value>100</value>
     <description>The number of times you want to try to get all the locks</description>
   </property>
   <property>
-    <key>hive.unlock.numretries</key>
+    <name>hive.unlock.numretries</name>
     <value>10</value>
     <description>The number of times you want to retry to do one unlock</description>
   </property>
   <property>
-    <key>hive.lock.sleep.between.retries</key>
-    <value>60</value>

[... 1157 lines stripped ...]