You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2014/02/03 21:58:07 UTC

git commit: AMBARI-4505. HCat, Tez client install fails (Eugene Chekanskiy via dlysnichenko)

Updated Branches:
  refs/heads/trunk b31639e72 -> 8fb468378


AMBARI-4505. HCat, Tez client install fails (Eugene Chekanskiy via dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8fb46837
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8fb46837
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8fb46837

Branch: refs/heads/trunk
Commit: 8fb4683780319704457e758cc2d55c6d247302c3
Parents: b31639e
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Mon Feb 3 22:57:05 2014 +0200
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Mon Feb 3 22:57:05 2014 +0200

----------------------------------------------------------------------
 .../hooks/before-INSTALL/scripts/params.py      |   3 +-
 .../scripts/shared_initialization.py            |   6 +-
 .../HDP/2.0.6/services/HIVE/package/files/hcat  |  13 +
 .../2.0.6/services/HIVE/package/scripts/hcat.py |   5 +
 .../services/HIVE/package/scripts/params.py     |  10 +-
 .../services/WEBHCAT/package/scripts/params.py  |   7 +-
 .../WEBHCAT/package/scripts/webhcat_service.py  |   2 +-
 .../HIVE/configuration/hive-exec-log4j.xml      | 122 ++++++++
 .../services/HIVE/configuration/hive-log4j.xml  | 130 +++++++++
 .../services/HIVE/configuration/hive-site.xml   | 285 +++++++++++++++++++
 .../stacks/HDP/2.1.1/services/HIVE/metainfo.xml |  99 +++++++
 .../2.1.1/services/TEZ/package/scripts/tez.py   |   3 +-
 .../WEBHCAT/configuration/webhcat-site.xml      | 126 ++++++++
 .../HDP/2.1.1/services/WEBHCAT/metainfo.xml     |  49 ++++
 .../stacks/2.0.6/HIVE/test_hcat_client.py       |  10 +-
 .../hooks/before-INSTALL/test_before_install.py |   3 +
 .../python/stacks/2.1.1/TEZ/test_tez_client.py  |   3 +-
 17 files changed, 866 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
index dc6d770..bdd598e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
@@ -38,6 +38,7 @@ zk_user = config['configurations']['global']['zk_user']
 gmetad_user = config['configurations']['global']["gmetad_user"]
 gmond_user = config['configurations']['global']["gmond_user"]
 storm_user = config['configurations']['global']['storm_user']
+tez_user = default('/configurations/global/tez_user', 'tez')
 
 user_group = config['configurations']['global']['user_group']
 proxyuser_group =  config['configurations']['global']['proxyuser_group']
@@ -81,4 +82,4 @@ is_slave = hostname in slave_hosts
 if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
 
-hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
\ No newline at end of file
+hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
index cf6c2c5..c5688b0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -34,6 +34,10 @@ def setup_users():
        gid=params.user_group,
        groups=[params.proxyuser_group]
   )
+  User(params.tez_user,
+      gid=params.user_group,
+      groups=[params.proxyuser_group]
+  )
   smoke_user_dirs = format(
     "/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
   set_uid(params.smoke_user, smoke_user_dirs)
@@ -110,4 +114,4 @@ def set_uid(user, user_dirs):
 def install_packages():
   Package("unzip")
   Package("net-snmp")
-  Package("net-snmp-utils")
\ No newline at end of file
+  Package("net-snmp-utils")

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcat
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcat b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcat
new file mode 100644
index 0000000..e579a1c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcat
@@ -0,0 +1,13 @@
+#!/bin/sh
+. /etc/default/hadoop
+
+# Instead of bigtop-detect-javahome
+. /etc/hive-hcatalog/conf/hcat-env.sh
+
+# FIXME: HCATALOG-636 (and also HIVE-2757)
+export HIVE_HOME=/usr/lib/hive
+export HIVE_CONF_DIR=/etc/hive/conf
+export HCAT_HOME=/usr/lib/hive-hcatalog
+
+export HCATALOG_HOME=/usr/lib/hive-hcatalog
+exec /usr/lib/hive-hcatalog/bin/hcat "$@"

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
index 2993d3a..56c3110 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
@@ -35,6 +35,11 @@ def hcat():
             recursive=True
   )
 
+  File("/usr/bin/hcat",
+       mode=0755,
+       content=StaticFile('hcat')
+  )
+
   hcat_TemplateConfig('hcat-env.sh')
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
index 9803b35..4c2a335 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
@@ -106,11 +106,15 @@ mysql_host = config['clusterHostInfo']['hive_mysql_host']
 mysql_adduser_path = "/tmp/addMysqlUser.sh"
 
 ########## HCAT
+if config['hostLevelParams']['stack_version'] == '2.1.1':
+  hcat_conf_dir = '/etc/hive-hcatalog/conf'
+  hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
+else:
+  hcat_conf_dir = '/etc/hcatalog/conf'
+  hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
 
-hcat_conf_dir = '/etc/hcatalog/conf'
 
 metastore_port = 9933
-hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
 
 hcat_dbroot = hcat_lib
 
@@ -134,4 +138,4 @@ if ('hive-exec-log4j' in config['configurations']):
 else:
   log4j_exec_props = None
 
-daemon_name = status_params.daemon_name
\ No newline at end of file
+daemon_name = status_params.daemon_name

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
index 83211e1..08a01a4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
@@ -29,7 +29,12 @@ config = Script.get_config()
 webhcat_user = config['configurations']['global']['webhcat_user']
 download_url = config['configurations']['global']['apache_artifacts_download_url']
 
-config_dir = '/etc/hcatalog/conf'
+if config['hostLevelParams']['stack_version'] == '2.1.1':
+  config_dir = '/etc/hive-webhcat/conf'
+  webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
+else:
+  config_dir = '/etc/hcatalog/conf'
+  webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
 
 templeton_log_dir = config['configurations']['global']['hcat_log_dir']
 templeton_pid_dir = status_params.templeton_pid_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_service.py
index 12c3854..4b86338 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_service.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_service.py
@@ -24,7 +24,7 @@ from resource_management import *
 def webhcat_service(action='start'):
   import params
 
-  cmd = format('env HADOOP_HOME={hadoop_home} /usr/lib/hcatalog/sbin/webhcat_server.sh')
+  cmd = format('env HADOOP_HOME={hadoop_home} {webhcat_bin_dir}/webhcat_server.sh')
 
   if action == 'start':
     demon_cmd = format('{cmd} start')

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-exec-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-exec-log4j.xml
new file mode 100644
index 0000000..b0f5268
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-exec-log4j.xml
@@ -0,0 +1,122 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>hive.log.threshold</name>
+    <value>ALL</value>
+  </property>
+  <property>
+    <name>hive.root.logger</name>
+    <value>INFO,FA</value>
+  </property>
+  <property>
+    <name>hive.log.dir</name>
+    <value>/tmp/${user.name}</value>
+  </property>
+  <property>
+    <name>hive.log.file</name>
+    <value>${hive.query.id}.log</value>
+  </property>
+  <property>
+    <name>log4j.rootLogger</name>
+    <value>${hive.root.logger}, EventCounter</value>
+  </property>
+  <property>
+    <name>log4j.threshhold</name>
+    <value>${hive.log.threshold}</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA</name>
+    <value>org.apache.log4j.FileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA.File</name>
+    <value>${hive.log.dir}/${hive.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.target</name>
+    <value>System.err</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.EventCounter</name>
+    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
+  </property>
+  <property>
+    <name>log4j.category.DataNucleus</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore.Schema</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Datastore</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Plugin</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.MetaData</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Query</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.General</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Enhancer</name>
+    <value>ERROR,FA</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-log4j.xml
new file mode 100644
index 0000000..e92c3e8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-log4j.xml
@@ -0,0 +1,130 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>hive.log.threshold</name>
+    <value>ALL</value>
+  </property>
+  <property>
+    <name>hive.root.logger</name>
+    <value>WARN,DRFA</value>
+  </property>
+  <property>
+    <name>hive.log.dir</name>
+    <value>/tmp/${user.name}</value>
+  </property>
+  <property>
+    <name>hive.log.file</name>
+    <value>hive.log</value>
+  </property>
+  <property>
+    <name>log4j.rootLogger</name>
+    <value>${hive.root.logger}, EventCounter</value>
+  </property>
+  <property>
+    <name>log4j.threshold</name>
+    <value>${hive.log.threshold}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.File</name>
+    <value>${hive.log.dir}/${hive.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.target</name>
+    <value>System.err</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.encoding</name>
+    <value>UTF-8</value>
+  </property>
+  <property>
+    <name>log4j.appender.EventCounter</name>
+    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
+  </property>
+  <property>
+    <name>log4j.category.DataNucleus</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore.Schema</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Datastore</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Plugin</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.MetaData</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Query</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.General</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Enhancer</name>
+    <value>ERROR,DRFA</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 0000000..bfdc8ac
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,285 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration>
+
+  <property>
+    <name>ambari.hive.db.schema.name</name>
+    <value>hive</value>
+    <description>Database name used as the Hive Metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc</value>
+    <description>JDBC connect string for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionDriverName</name>
+    <value>com.mysql.jdbc.Driver</value>
+    <description>Driver class name for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionUserName</name>
+    <value>hive</value>
+    <description>username to use against metastore database</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionPassword</name>
+    <value> </value>
+    <description>password to use against metastore database</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/apps/hive/warehouse</value>
+    <description>location of default database for the warehouse</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.sasl.enabled</name>
+    <value></value>
+    <description>If true, the metastore thrift interface will be secured with SASL.
+     Clients must authenticate with Kerberos.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.keytab.file</name>
+    <value></value>
+    <description>The path to the Kerberos Keytab file containing the metastore
+     thrift server's service principal.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.principal</name>
+    <value></value>
+    <description>The service principal for the metastore thrift server. The special
+    string _HOST will be replaced automatically with the correct host name.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.cache.pinobjtypes</name>
+    <value>Table,Database,Type,FieldSchema,Order</value>
+    <description>List of comma separated metastore object types that should be pinned in the cache</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.uris</name>
+    <value>thrift://localhost:9083</value>
+    <description>URI for client to contact metastore server</description>
+  </property>
+
+  <property>
+    <name>hive.semantic.analyzer.factory.impl</name>
+    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
+    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>60</value>
+    <description>MetaStore Client socket timeout in seconds</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.execute.setugi</name>
+    <value>true</value>
+    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.enabled</name>
+    <value>false</value>
+    <description>enable or disable the hive client authorization</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>the hive client authorization manager class name.
+    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.security.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
+    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+    <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
+      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
+      process runs as.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.hdfs.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable HDFS filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>fs.file.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable local filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.bucketing</name>
+    <value>true</value>
+    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.sorting</name>
+    <value>true</value>
+    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr</name>
+    <value>true</value>
+    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin</name>
+    <value>true</value>
+    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
+      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
+      this parameter as true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+    <value>true</value>
+    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
+    of buckets, a sort-merge join can be performed by setting this parameter as true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>Whether speculative execution for reducers should be turned on.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common
+      join into mapjoin based on the input file size.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join</name>
+    <value>true</value>
+    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
+      the criteria for sort-merge join.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
+    <value>true</value>
+    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
+      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
+      specified size, the join is directly converted to a mapjoin (there is no conditional task).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>1000000000</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task). The default is 10MB.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>1</value>
+    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
+      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
+      The optimization will be disabled if number of reducers is less than specified value.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.mapjoin.mapreduce</name>
+    <value>true</value>
+    <description>If hive.auto.convert.join is off, this parameter does not take
+      affect. If it is on, and if there are map-join jobs followed by a map-reduce
+      job (for e.g a group by), each map-only job is merged with the following
+      map-reduce job.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapjoin.bucket.cache.size</name>
+    <value>10000</value>
+    <description>
+      Size per reducer.The default is 1G, i.e if the input size is 10G, it
+      will use 10 reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.execution.enabled</name>
+    <value>false</value>
+    <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication</name>
+    <value>true</value>
+    <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.index.filter</name>
+    <value>true</value>
+    <description>
+    Whether to enable automatic use of indexes
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..5aba96f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml
@@ -0,0 +1,99 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+      <version>0.12.0.2.1.1</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hive</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql-connector-java</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos6</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos5</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <configuration-dependencies>
+        <config-type>hive-site</config-type>
+        <config-type>hive-log4j</config-type>
+        <config-type>hive-exec-log4j</config-type>
+        <config-type>global</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>HCATALOG</name>
+      <comment>This is comment for HCATALOG service</comment>
+      <version>0.12.0.2.0.6.0</version>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hive-hcatalog</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/TEZ/package/scripts/tez.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/TEZ/package/scripts/tez.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/TEZ/package/scripts/tez.py
index 42ac9f9..b23a356 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/TEZ/package/scripts/tez.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/TEZ/package/scripts/tez.py
@@ -27,7 +27,8 @@ def tez():
 
   Directory( params.conf_dir,
     owner = params.tez_user,
-    group = params.user_group
+    group = params.user_group,
+    recursive = True
   )
 
   XmlConfig( "tez-site.xml",

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/configuration/webhcat-site.xml
new file mode 100644
index 0000000..39b901e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/configuration/webhcat-site.xml
@@ -0,0 +1,126 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- 
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!-- The default settings for Templeton. -->
+<!-- Edit templeton-site.xml to change settings for your local -->
+<!-- install. -->
+
+<configuration>
+
+  <property>
+    <name>templeton.port</name>
+      <value>50111</value>
+    <description>The HTTP port for the main server.</description>
+  </property>
+
+  <property>
+    <name>templeton.hadoop.conf.dir</name>
+    <value>/etc/hadoop/conf</value>
+    <description>The path to the Hadoop configuration.</description>
+  </property>
+
+  <property>
+    <name>templeton.jar</name>
+    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
+    <description>The path to the Templeton jar file.</description>
+  </property>
+
+  <property>
+    <name>templeton.libjars</name>
+    <value>/usr/lib/zookeeper/zookeeper.jar</value>
+    <description>Jars to add the the classpath.</description>
+  </property>
+
+
+  <property>
+    <name>templeton.hadoop</name>
+    <value>/usr/bin/hadoop</value>
+    <description>The path to the Hadoop executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.pig.archive</name>
+    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
+    <description>The path to the Pig archive.</description>
+  </property>
+
+  <property>
+    <name>templeton.pig.path</name>
+    <value>pig.tar.gz/pig/bin/pig</value>
+    <description>The path to the Pig executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hcat</name>
+    <value>/usr/bin/hcat</value>
+    <description>The path to the hcatalog executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.archive</name>
+    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
+    <description>The path to the Hive archive.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.path</name>
+    <value>hive.tar.gz/hive/bin/hive</value>
+    <description>The path to the Hive executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.properties</name>
+    <value></value>
+    <description>Properties to set when running hive.</description>
+  </property>
+
+
+  <property>
+    <name>templeton.zookeeper.hosts</name>
+    <value>localhost:2181</value>
+    <description>ZooKeeper servers, as comma separated host:port pairs</description>
+  </property>
+
+  <property>
+    <name>templeton.storage.class</name>
+    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
+    <description>The class to use as storage</description>
+  </property>
+
+  <property>
+   <name>templeton.override.enabled</name>
+   <value>false</value>
+   <description>
+     Enable the override path in templeton.override.jars
+   </description>
+ </property>
+
+ <property>
+    <name>templeton.streaming.jar</name>
+    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
+    <description>The hdfs path to the Hadoop streaming jar file.</description>
+  </property> 
+
+  <property>
+    <name>templeton.exec.timeout</name>
+    <value>60000</value>
+    <description>Time out for templeton api</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/metainfo.xml
new file mode 100644
index 0000000..536689f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/metainfo.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>WEBHCAT</name>
+      <comment>This is comment for WEBHCAT service</comment>
+      <version>0.12.0.2.1.1</version>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hive-webhcat</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>webhcat-tar-pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <configuration-dependencies>
+        <config-type>webhcat-site</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
index 8366d56..ec6d92d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
@@ -37,6 +37,10 @@ class TestHcatClient(RMFTestCase):
       owner = 'hcat',
       recursive = True,
     )
+    self.assertResourceCalled('File',"/usr/bin/hcat",
+      mode=0755,
+      content=StaticFile('hcat')
+    )
     self.assertResourceCalled('TemplateConfig', '/etc/hcatalog/conf/hcat-env.sh',
       owner = 'hcat',
       group = 'hadoop',
@@ -60,8 +64,12 @@ class TestHcatClient(RMFTestCase):
       owner = 'hcat',
       recursive = True,
     )
+    self.assertResourceCalled('File',"/usr/bin/hcat",
+      mode=0755,
+      content=StaticFile('hcat')
+    )
     self.assertResourceCalled('TemplateConfig', '/etc/hcatalog/conf/hcat-env.sh',
       owner = 'hcat',
       group = 'hadoop',
     )
-    self.assertNoMoreResources()
\ No newline at end of file
+    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
index 765c0b2..213da63 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
@@ -34,6 +34,9 @@ class TestHookBeforeInstall(RMFTestCase):
     self.assertResourceCalled('User', 'ambari-qa',
                           gid='hadoop',
                           groups=['users'], )
+    self.assertResourceCalled('User', 'tez',
+                              gid='hadoop',
+                              groups=['users'], )
     self.assertResourceCalled('File', '/tmp/changeUid.sh',
                           content=StaticFile('changeToSecureUid.sh'),
                           mode=0555, )

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fb46837/ambari-server/src/test/python/stacks/2.1.1/TEZ/test_tez_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1.1/TEZ/test_tez_client.py b/ambari-server/src/test/python/stacks/2.1.1/TEZ/test_tez_client.py
index 21a3700..05224fc 100644
--- a/ambari-server/src/test/python/stacks/2.1.1/TEZ/test_tez_client.py
+++ b/ambari-server/src/test/python/stacks/2.1.1/TEZ/test_tez_client.py
@@ -31,7 +31,8 @@ class TestTezClient(RMFTestCase):
 
     self.assertResourceCalled('Directory', '/etc/tez/conf',
       owner = 'tez',
-      group = 'hadoop'
+      group = 'hadoop',
+      recursive = True
     )
 
     self.assertResourceCalled('XmlConfig', 'tez-site.xml',