You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2013/12/30 14:01:17 UTC

[17/23] AMBARI-4194. Enable HDP1 Res. management stack (aonishuk)

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat_service_check.py
new file mode 100644
index 0000000..5112e99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat_service_check.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hcat_service_check():
+    import params
+
+    unique = get_unique_id_and_date()
+    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
+    test_cmd = format("fs -test -e {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format(
+        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
+    else:
+      kinit_cmd = ""
+
+    File('/tmp/hcatSmoke.sh',
+         content=StaticFile("hcatSmoke.sh"),
+         mode=0755
+    )
+
+    prepare_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} prepare")
+
+    Execute(prepare_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True)
+
+    ExecuteHadoop(test_cmd,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir)
+
+    cleanup_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} cleanup")
+
+    Execute(cleanup_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive.py
new file mode 100644
index 0000000..b37ebb2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+
+def hive(name=None):
+  import params
+
+  if name == 'metastore' or name == 'hiveserver2':
+    hive_config_dir = params.hive_server_conf_dir
+    config_file_mode = 0600
+    jdbc_connector()
+  else:
+    hive_config_dir = params.hive_conf_dir
+    config_file_mode = 0644
+
+  Directory(hive_config_dir,
+            owner=params.hive_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  XmlConfig("hive-site.xml",
+            conf_dir=hive_config_dir,
+            configurations=params.config['configurations']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=config_file_mode
+  )
+
+  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 "
+               "{jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
+
+  Execute(cmd,
+          not_if=format("[ -f {check_db_connection_jar_name}]"))
+
+  if name == 'metastore':
+    File(params.start_metastore_path,
+         mode=0755,
+         content=StaticFile('startMetastore.sh')
+    )
+
+  elif name == 'hiveserver2':
+    File(params.start_hiveserver2_path,
+         mode=0755,
+         content=StaticFile('startHiveserver2.sh')
+    )
+
+  if name != "client":
+    crt_directory(params.hive_pid_dir)
+    crt_directory(params.hive_log_dir)
+    crt_directory(params.hive_var_lib)
+
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
+  )
+
+  crt_file(format("{hive_conf_dir}/hive-default.xml.template"))
+  crt_file(format("{hive_conf_dir}/hive-env.sh.template"))
+  crt_file(format("{hive_conf_dir}/hive-exec-log4j.properties.template"))
+  crt_file(format("{hive_conf_dir}/hive-log4j.properties.template"))
+
+
+def crt_directory(name):
+  import params
+
+  Directory(name,
+            recursive=True,
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+
+
+def crt_file(name):
+  import params
+
+  File(name,
+       owner=params.hive_user,
+       group=params.user_group
+  )
+
+
+def jdbc_connector():
+  import params
+
+  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
+    cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            creates=params.target,
+            path=["/bin", "usr/bin/"])
+
+  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+    cmd = format(
+      "mkdir -p {artifact_dir} ; curl -kf --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
+      "cp {driver_curl_target} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            path=["/bin", "usr/bin/"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_client.py
new file mode 100644
index 0000000..0a5fb2b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_client.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+
+from hive import hive
+
+class HiveClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='client')
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_metastore.py
new file mode 100644
index 0000000..c741174
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_metastore.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+
+class HiveMetastore(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='metastore')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'metastore',
+                   action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hive_service( 'metastore',
+                   action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_server.py
new file mode 100644
index 0000000..3ad81a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_server.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+
+class HiveServer(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='hiveserver2')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'hiveserver2',
+                  action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hive_service( 'hiveserver2',
+                  action = 'stop'
+    )
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_service.py
new file mode 100644
index 0000000..e8d4e5c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_service.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def hive_service(
+    name,
+    action='start'):
+
+  import params
+
+  if name == 'metastore':
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    cmd = format(
+      "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir}")
+  elif name == 'hiveserver2':
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    cmd = format(
+      "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir}")
+
+  if action == 'start':
+    demon_cmd = format("{cmd}")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    Execute(demon_cmd,
+            user=params.hive_user,
+            not_if=no_op_test
+    )
+
+    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+      db_connection_check_command = format(
+        "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}")
+      Execute(db_connection_check_command,
+              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin')
+
+  elif action == 'stop':
+    demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
+    Execute(demon_cmd)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_server.py
new file mode 100644
index 0000000..5360f99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_server.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from mysql_service import mysql_service
+
+class MysqlServer(Script):
+
+  if System.get_instance().platform == "suse":
+    daemon_name = 'mysql'
+  else:
+    daemon_name = 'mysqld'
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action='start')
+
+    File(params.mysql_adduser_path,
+         mode=0755,
+         content=StaticFile('addMysqlUser.sh')
+    )
+
+    # Autoescaping
+    cmd = ("bash", "-x", params.mysql_adduser_path, self.daemon_name,
+           params.hive_metastore_user_name, params.hive_metastore_user_passwd, params.mysql_host[0])
+
+    Execute(cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True
+    )
+
+    mysql_service(daemon_name=self.daemon_name, action='stop')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action = 'stop')
+
+  def status(self, env):
+    mysql_service(daemon_name=self.daemon_name, action = 'status')
+
+if __name__ == "__main__":
+  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_service.py
new file mode 100644
index 0000000..4716343
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_service.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def mysql_service(daemon_name=None, action='start'):
+
+  logoutput=True
+  if action == 'start':
+    cmd = format('service {daemon_name} start')
+  elif action == 'stop':
+    cmd = format('service {daemon_name} stop')
+  elif action == 'status':
+    cmd = format('service {daemon_name} status')
+    logoutput = False
+  else:
+    cmd = None
+
+  if cmd is not None:
+    Execute(cmd,
+            path="/usr/local/bin/:/bin/:/sbin/",
+            tries=1,
+            logoutput=logoutput)
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/params.py
new file mode 100644
index 0000000..38bb517
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/params.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
+hive_server_conf_dir = "/etc/hive/conf.server"
+hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
+
+hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
+
+#users
+hive_user = config['configurations']['global']['hive_user']
+hive_lib = '/usr/lib/hive/lib/'
+#JDBC driver jar name
+hive_jdbc_driver = default('hive_jdbc_driver', 'com.mysql.jdbc.Driver')
+if hive_jdbc_driver == "com.mysql.jdbc.Driver":
+  jdbc_jar_name = "mysql-connector-java.jar"
+elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+  jdbc_jar_name = "ojdbc6.jar"
+
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+
+#common
+hive_metastore_port = config['configurations']['global']['hive_metastore_port']
+hive_var_lib = '/var/lib/hive'
+hive_server_host = config['clusterHostInfo']['hive_server_host']
+hive_url = format("jdbc:hive2://{hive_server_host}:10000")
+
+smokeuser = config['configurations']['global']['smokeuser']
+smoke_test_sql = "/tmp/hiveserver2.sql"
+smoke_test_path = "/tmp/hiveserver2Smoke.sh"
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+
+security_enabled = config['configurations']['global']['security_enabled']
+
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+
+#hive_env
+hive_conf_dir = "/etc/hive/conf"
+hive_dbroot = config['configurations']['global']['hive_dbroot']
+hive_log_dir = config['configurations']['global']['hive_log_dir']
+hive_pid_dir = status_params.hive_pid_dir
+hive_pid = status_params.hive_pid
+
+#hive-site
+hive_database_name = config['configurations']['global']['hive_database_name']
+
+#Starting hiveserver2
+start_hiveserver2_script = 'startHiveserver2.sh'
+
+hadoop_home = '/usr'
+
+##Starting metastore
+start_metastore_script = 'startMetastore.sh'
+hive_metastore_pid = status_params.hive_metastore_pid
+java_share_dir = '/usr/share/java'
+driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
+
+hdfs_user =  config['configurations']['global']['hdfs_user']
+user_group = config['configurations']['global']['user_group']
+artifact_dir = "/tmp/HDP-artifacts/"
+
+target = format("{hive_lib}/{jdbc_jar_name}")
+
+jdk_location = config['hostLevelParams']['jdk_location']
+driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
+
+start_hiveserver2_path = "/tmp/start_hiveserver2_script"
+start_metastore_path = "/tmp/start_metastore_script"
+
+hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
+hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+java64_home = config['configurations']['global']['java64_home']
+
+##### MYSQL
+
+db_name = config['configurations']['global']['hive_database_name']
+mysql_user = "mysql"
+mysql_group = 'mysql'
+mysql_host = config['clusterHostInfo']['hive_mysql_host']
+
+mysql_adduser_path = "/tmp/addMysqlUser.sh"
+
+########## HCAT
+
+hcat_conf_dir = '/etc/hcatalog/conf'
+
+metastore_port = 9933
+hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
+
+hcat_dbroot = hcat_lib
+
+hcat_user = config['configurations']['global']['hcat_user']
+webhcat_user = config['configurations']['global']['webhcat_user']
+
+hcat_pid_dir = status_params.hcat_pid_dir
+hcat_log_dir = config['configurations']['global']['hcat_log_dir']   #hcat_log_dir
+
+hadoop_conf_dir = '/etc/hadoop/conf'

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/service_check.py
new file mode 100644
index 0000000..111e8a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/service_check.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+from hcat_service_check import hcat_service_check
+
+class HiveServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+      hive_principal_ext = format("principal={hive_metastore_keytab_path}")
+      hive_url_ext = format("{hive_url}/\\;{hive_principal_ext}")
+      smoke_cmd = format("{kinit_cmd} env JAVA_HOME={java64_home} {smoke_test_path} {hive_url_ext} {smoke_test_sql}")
+    else:
+      smoke_cmd = format("env JAVA_HOME={java64_home} {smoke_test_path} {hive_url} {smoke_test_sql}")
+
+    File(params.smoke_test_path,
+         content=StaticFile('hiveserver2Smoke.sh'),
+         mode=0755
+    )
+
+    File(params.smoke_test_sql,
+         content=StaticFile('hiveserver2.sql')
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True,
+            user=params.smokeuser)
+
+    hcat_service_check()
+
+if __name__ == "__main__":
+  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/status_params.py
new file mode 100644
index 0000000..7770975
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/status_params.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hive_pid_dir = config['configurations']['global']['hive_pid_dir']
+hive_pid = 'hive-server.pid'
+
+hive_metastore_pid = 'hive.pid'
+
+hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hcat-env.sh.j2
new file mode 100644
index 0000000..2a35240
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hcat-env.sh.j2
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME={{java64_home}}
+HCAT_PID_DIR={{hcat_pid_dir}}/
+HCAT_LOG_DIR={{hcat_log_dir}}/
+HCAT_CONF_DIR={{hcat_conf_dir}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+#DBROOT is the path where the connector jars are downloaded
+DBROOT={{hcat_dbroot}}
+USER={{hcat_user}}
+METASTORE_PORT={{metastore_port}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hive-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hive-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hive-env.sh.j2
new file mode 100644
index 0000000..548262a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hive-env.sh.j2
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hive and Hadoop environment variables here. These variables can be used
+# to control the execution of Hive. It should be used by admins to configure
+# the Hive installation (so that users do not have to set environment variables
+# or set command line parameters to get correct behavior).
+#
+# The hive service being invoked (CLI/HWI etc.) is available via the environment
+# variable SERVICE
+
+# Hive Client memory usage can be an issue if a large number of clients
+# are running at the same time. The flags below have been useful in
+# reducing memory usage:
+#
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{conf_dir}}
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+# export HIVE_AUX_JARS_PATH=
+export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/global.xml
new file mode 100644
index 0000000..c49480f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/global.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hue_pid_dir</name>
+    <value>/var/run/hue</value>
+    <description>Hue Pid Dir.</description>
+  </property>
+  <property>
+    <name>hue_log_dir</name>
+    <value>/var/log/hue</value>
+    <description>Hue Log Dir.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/hue-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/hue-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/hue-site.xml
new file mode 100644
index 0000000..6eb52a2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/hue-site.xml
@@ -0,0 +1,290 @@
+<?xml version="1.0"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more# Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with# contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.# this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0# The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with# (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at# the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0#     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software# Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,# distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and# See the License for the specific language governing permissions and
+   limitations under the License.# limitations under the License.
+-->
+
+<configuration>
+  <!-- General Hue server configuration properties -->
+  <property>
+      <name>send_debug_messages</name>
+      <value>1</value>
+      <description></description>
+  </property>
+
+  <property>
+    <name>database_logging</name>
+    <value>0</value>
+    <description>To show database transactions, set database_logging to 1.
+      default, database_logging=0</description>
+  </property>
+
+  <property>
+    <name>secret_key</name>
+    <value></value>
+    <description>This is used for secure hashing in the session store.</description>
+  </property>
+
+  <property>
+    <name>http_host</name>
+    <value>0.0.0.0</value>
+    <description>Webserver listens on this address and port</description>
+  </property>
+
+  <property>
+    <name>http_port</name>
+    <value>8000</value>
+    <description>Webserver listens on this address and port</description>
+  </property>
+
+  <property>
+    <name>time_zone</name>
+    <value>America/Los_Angeles</value>
+    <description>Time zone name</description>
+  </property>
+
+  <property>
+    <name>django_debug_mode</name>
+    <value>1</value>
+    <description>Turn off debug</description>
+  </property>
+
+  <property>
+    <name>use_cherrypy_server</name>
+    <value>false</value>
+    <description>Set to true to use CherryPy as the webserver, set to false
+      to use Spawning as the webserver. Defaults to Spawning if
+      key is not specified.</description>
+  </property>
+
+  <property>
+    <name>http_500_debug_mode</name>
+    <value>1</value>
+    <description>Turn off backtrace for server error</description>
+  </property>
+
+  <property>
+    <name>server_user</name>
+    <value></value>
+    <description>Webserver runs as this user</description>
+  </property>
+
+  <property>
+    <name>server_group</name>
+    <value></value>
+    <description>Webserver runs as this user</description>
+  </property>
+
+  <property>
+    <name>backend_auth_policy</name>
+    <value>desktop.auth.backend.AllowAllBackend</value>
+    <description>Authentication backend.</description>
+  </property>
+
+  <!-- Hue Database configuration properties -->
+  <property>
+    <name>db_engine</name>
+    <value>mysql</value>
+    <description>Configuration options for specifying the Desktop Database.</description>
+  </property>
+
+  <property>
+    <name>db_host</name>
+    <value>localhost</value>
+    <description>Configuration options for specifying the Desktop Database.</description>
+  </property>
+
+  <property>
+    <name>db_port</name>
+    <value>3306</value>
+    <description>Configuration options for specifying the Desktop Database.</description>
+  </property>
+
+  <property>
+    <name>db_user</name>
+    <value>sandbox</value>
+    <description>Configuration options for specifying the Desktop Database.</description>
+  </property>
+
+  <property>
+    <name>db_password</name>
+    <value>1111</value>
+    <description>Configuration options for specifying the Desktop Database.</description>
+  </property>
+
+  <property>
+    <name>db_name</name>
+    <value>sandbox</value>
+    <description>Configuration options for specifying the Desktop Database.</description>
+  </property>
+
+  <!-- Hue Email configuration properties -->
+  <property>
+    <name>smtp_host</name>
+    <value>localhost</value>
+    <description>The SMTP server information for email notification delivery.</description>
+  </property>
+
+  <property>
+    <name>smtp_port</name>
+    <value>25</value>
+    <description>The SMTP server information for email notification delivery.</description>
+  </property>
+
+  <property>
+    <name>smtp_user</name>
+    <value></value>
+    <description>The SMTP server information for email notification delivery.</description>
+  </property>
+
+  <property>
+    <name>smtp_password</name>
+    <value>25</value>
+    <description>The SMTP server information for email notification delivery.</description>
+  </property>
+
+  <property>
+    <name>tls</name>
+    <value>no</value>
+    <description>Whether to use a TLS (secure) connection when talking to the SMTP server.</description>
+  </property>
+
+  <property>
+    <name>default_from_email</name>
+    <value>sandbox@hortonworks.com</value>
+    <description>The SMTP server information for email notification delivery.</description>
+  </property>
+
+  <!-- Hue Hadoop configuration properties -->
+  <property>
+    <name>fs_defaultfs</name>
+    <value></value>
+    <description>Enter the filesystem uri. E.g
+      .:hdfs://sandbox:8020</description>
+  </property>
+
+  <property>
+    <name>webhdfs_url</name>
+    <value></value>
+    <description>Use WebHdfs/HttpFs as the communication mechanism. To fallback to
+      using the Thrift plugin (used in Hue 1.x), this must be uncommented
+      and explicitly set to the empty value.
+      Value e.g.: http://localhost:50070/webhdfs/v1/</description>
+  </property>
+
+  <property>
+    <name>jobtracker_host</name>
+    <value></value>
+    <description>Enter the host on which you are running the Hadoop JobTracker.</description>
+  </property>
+
+  <property>
+    <name>jobtracker_port</name>
+    <value>50030</value>
+    <description>The port where the JobTracker IPC listens on.</description>
+  </property>
+
+  <property>
+    <name>hadoop_mapred_home</name>
+    <value>/usr/lib/hadoop/lib</value>
+    <description>The SMTP server information for email notification delivery.</description>
+  </property>
+
+  <property>
+    <name>resourcemanager_host</name>
+    <value></value>
+    <description>Enter the host on which you are running the ResourceManager.</description>
+  </property>
+
+  <property>
+    <name>resourcemanager_port</name>
+    <value></value>
+    <description>The port where the ResourceManager IPC listens on.</description>
+  </property>
+
+  <!-- Hue Beeswax configuration properties -->
+  <property>
+    <name>hive_home_dir</name>
+    <value></value>
+    <description>Hive home directory.</description>
+  </property>
+
+  <property>
+    <name>hive_conf_dir</name>
+    <value></value>
+    <description>Hive configuration directory, where hive-site.xml is
+      located.</description>
+  </property>
+
+  <property>
+    <name>templeton_url</name>
+    <value></value>
+    <description>WebHcat http URL</description>
+  </property>
+
+  <!-- Hue shell types configuration -->
+  <property>
+    <name>pig_nice_name</name>
+    <value></value>
+    <description>Define and configure a new shell type pig</description>
+  </property>
+
+  <property>
+    <name>pig_shell_command</name>
+    <value>/usr/bin/pig -l /dev/null</value>
+    <description>Define and configure a new shell type pig.</description>
+  </property>
+
+  <property>
+    <name>pig_java_home</name>
+    <value></value>
+    <description>Define and configure a new shell type pig.</description>
+  </property>
+
+  <property>
+    <name>hbase_nice_name</name>
+    <value>HBase Shell</value>
+    <description>Define and configure a new shell type hbase</description>
+  </property>
+
+  <property>
+    <name>hbase_shell_command</name>
+    <value>/usr/bin/hbase shell</value>
+    <description>Define and configure a new shell type hbase.</description>
+  </property>
+
+  <property>
+    <name>bash_nice_name</name>
+    <value></value>
+    <description>Define and configure a new shell type bash for testing
+      only</description>
+  </property>
+
+  <property>
+    <name>bash_shell_command</name>
+    <value>/bin/bash</value>
+    <description>Define and configure a new shell type bash for testing only
+      .</description>
+  </property>
+
+  <!-- Hue Settings for the User Admin application -->
+  <property>
+    <name>whitelist</name>
+    <value>(localhost|127\.0\.0\.1):(50030|50070|50060|50075|50111)</value>
+    <description>proxy settings</description>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/metainfo.xml
new file mode 100644
index 0000000..ba580ca
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/metainfo.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <user>root</user>
+    <comment>Hue is a graphical user interface to operate and develop
+      applications for Apache Hadoop.</comment>
+    <version>2.2.0.1.3.3.0</version>
+
+    <components>
+        <component>
+            <name>HUE_SERVER</name>
+            <category>MASTER</category>
+        </component>
+    </components>
+
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/capacity-scheduler.xml
new file mode 100644
index 0000000..8034d19
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/capacity-scheduler.xml
@@ -0,0 +1,195 @@
+<?xml version="1.0"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- This is the configuration file for the resource manager in Hadoop. -->
+<!-- You can configure various scheduling parameters related to queues. -->
+<!-- The properties for a queue follow a naming convention,such as, -->
+<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
+
+<configuration>
+
+  <property>
+    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
+    <value>3000</value>
+    <description>Maximum number of jobs in the system which can be initialized,
+     concurrently, by the CapacityScheduler.
+    </description>    
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.capacity</name>
+    <value>100</value>
+    <description>Percentage of the number of slots in the cluster that are
+      to be available for jobs in this queue.
+    </description>    
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
+    <value>-1</value>
+    <description>
+	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
+	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
+	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
+        Default value of -1 implies a queue can use complete capacity of the cluster.
+
+        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
+        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
+        other queues being affected.
+        
+        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
+        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
+        absolute terms would increase accordingly.
+    </description>    
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
+    <value>false</value>
+    <description>If true, priorities of jobs will be taken into 
+      account in scheduling decisions.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
+    <value>100</value>
+    <description> Each queue enforces a limit on the percentage of resources 
+    allocated to a user at any given time, if there is competition for them. 
+    This user limit can vary between a minimum and maximum value. The former
+    depends on the number of users who have submitted jobs, and the latter is
+    set to this property value. For example, suppose the value of this 
+    property is 25. If two users have submitted jobs to a queue, no single 
+    user can use more than 50% of the queue resources. If a third user submits
+    a job, no single user can use more than 33% of the queue resources. With 4 
+    or more users, no user can use more than 25% of the queue's resources. A 
+    value of 100 implies no user limits are imposed. 
+    </description>
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
+    <value>1</value>
+    <description>The multiple of the queue capacity which can be configured to 
+    allow a single user to acquire more slots. 
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
+    <value>200000</value>
+    <description>The maximum number of tasks, across all jobs in the queue, 
+    which can be initialized concurrently. Once the queue's jobs exceed this 
+    limit they will be queued on disk.  
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
+    <value>100000</value>
+    <description>The maximum number of tasks per-user, across all the of the 
+    user's jobs in the queue, which can be initialized concurrently. Once the 
+    user's jobs exceed this limit they will be queued on disk.  
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
+    <value>10</value>
+    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
+    determine the number of jobs which are accepted by the scheduler.  
+    </description>
+  </property>
+
+  <!-- The default configuration settings for the capacity task scheduler -->
+  <!-- The default values would be applied to all the queues which don't have -->
+  <!-- the appropriate property for the particular queue -->
+  <property>
+    <name>mapred.capacity-scheduler.default-supports-priority</name>
+    <value>false</value>
+    <description>If true, priorities of jobs will be taken into 
+      account in scheduling decisions by default in a job queue.
+    </description>
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
+    <value>100</value>
+    <description>The percentage of the resources limited to a particular user
+      for the job queue at any given point of time by default.
+    </description>
+  </property>
+
+
+  <property>
+    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
+    <value>1</value>
+    <description>The default multiple of queue-capacity which is used to 
+    determine the amount of slots a single user can consume concurrently.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
+    <value>200000</value>
+    <description>The default maximum number of tasks, across all jobs in the 
+    queue, which can be initialized concurrently. Once the queue's jobs exceed 
+    this limit they will be queued on disk.  
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
+    <value>100000</value>
+    <description>The default maximum number of tasks per-user, across all the of 
+    the user's jobs in the queue, which can be initialized concurrently. Once 
+    the user's jobs exceed this limit they will be queued on disk.  
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
+    <value>10</value>
+    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
+    used to determine the number of jobs which are accepted by the scheduler.  
+    </description>
+  </property>
+
+  <!-- Capacity scheduler Job Initialization configuration parameters -->
+  <property>
+    <name>mapred.capacity-scheduler.init-poll-interval</name>
+    <value>5000</value>
+    <description>The amount of time in miliseconds which is used to poll 
+    the job queues for jobs to initialize.
+    </description>
+  </property>
+  <property>
+    <name>mapred.capacity-scheduler.init-worker-threads</name>
+    <value>5</value>
+    <description>Number of worker threads which would be used by
+    Initialization poller to initialize jobs in a set of queue.
+    If number mentioned in property is equal to number of job queues
+    then a single thread would initialize jobs in a queue. If lesser
+    then a thread would get a set of queues assigned. If the number
+    is greater then number of threads would be equal to number of 
+    job queues.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/core-site.xml
new file mode 100644
index 0000000..3a2af49
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/core-site.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/global.xml
new file mode 100644
index 0000000..4633855
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/global.xml
@@ -0,0 +1,160 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>jobtracker_host</name>
+    <value></value>
+    <description>JobTracker Host.</description>
+  </property>
+  <property>
+    <name>tasktracker_hosts</name>
+    <value></value>
+    <description>TaskTracker hosts.</description>
+  </property>
+  <property>
+    <name>mapred_local_dir</name>
+    <value>/hadoop/mapred</value>
+    <description>MapRed Local Directories.</description>
+  </property>
+  <property>
+    <name>mapred_system_dir</name>
+    <value>/mapred/system</value>
+    <description>MapRed System Directories.</description>
+  </property>
+  <property>
+    <name>scheduler_name</name>
+    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
+    <description>MapRed Capacity Scheduler.</description>
+  </property>
+  <property>
+    <name>jtnode_opt_newsize</name>
+    <value>200</value>
+    <description>Mem New Size.</description>
+  </property>
+  <property>
+    <name>jtnode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>Max New size.</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>jtnode_heapsize</name>
+    <value>1024</value>
+    <description>Maximum Java heap size for JobTracker in MB (Java option -Xmx)</description>
+  </property>
+  <property>
+    <name>mapred_map_tasks_max</name>
+    <value>4</value>
+    <description>Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker</description>
+  </property>
+  <property>
+    <name>mapred_red_tasks_max</name>
+    <value>2</value>
+    <description>Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker</description>
+  </property>
+  <property>
+    <name>mapred_cluster_map_mem_mb</name>
+    <value>-1</value>
+    <description>The virtual memory size of a single Map slot in the MapReduce framework</description>
+  </property>
+  <property>
+    <name>mapred_cluster_red_mem_mb</name>
+    <value>-1</value>
+    <description>The virtual memory size of a single Reduce slot in the MapReduce framework</description>
+  </property>
+  <property>
+    <name>mapred_job_map_mem_mb</name>
+    <value>-1</value>
+    <description>Virtual memory for single Map task</description>
+  </property>
+  <property>
+    <name>mapred_child_java_opts_sz</name>
+    <value>768</value>
+    <description>Java options for the TaskTracker child processes.</description>
+  </property>
+  <property>
+    <name>io_sort_mb</name>
+    <value>200</value>
+    <description>The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration).</description>
+  </property>
+  <property>
+    <name>io_sort_spill_percent</name>
+    <value>0.9</value>
+    <description>Percentage of sort buffer used for record collection (Expert-only configuration.</description>
+  </property>
+  <property>
+    <name>mapreduce_userlog_retainhours</name>
+    <value>24</value>
+    <description>The maximum time, in hours, for which the user-logs are to be retained after the job completion.</description>
+  </property>
+  <property>
+    <name>maxtasks_per_job</name>
+    <value>-1</value>
+    <description>Maximum number of tasks for a single Job</description>
+  </property>
+  <property>
+    <name>lzo_enabled</name>
+    <value>true</value>
+    <description>LZO compression enabled</description>
+  </property>
+  <property>
+    <name>snappy_enabled</name>
+    <value>true</value>
+    <description>LZO compression enabled</description>
+  </property>
+  <property>
+    <name>rca_enabled</name>
+    <value>true</value>
+    <description>Enable Job Diagnostics.</description>
+  </property>
+  <property>
+    <name>mapred_hosts_exclude</name>
+    <value></value>
+    <description>Exclude entered hosts</description>
+  </property>
+  <property>
+    <name>mapred_hosts_include</name>
+    <value></value>
+    <description>Include entered hosts</description>
+  </property>
+  <property>
+    <name>mapred_jobstatus_dir</name>
+    <value>/mapred/jobstatus</value>
+    <description>Job Status directory</description>
+  </property>
+  <property>
+    <name>task_controller</name>
+    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
+    <description>Task Controller.</description>
+  </property>
+  <property>
+    <name>mapred_user</name>
+    <value>mapred</value>
+    <description>MapReduce User.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-queue-acls.xml
new file mode 100644
index 0000000..ce12380
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-queue-acls.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- mapred-queue-acls.xml -->
+<configuration>
+
+
+<!-- queue default -->
+
+  <property>
+    <name>mapred.queue.default.acl-submit-job</name>
+    <value>*</value>
+  </property>
+
+  <property>
+    <name>mapred.queue.default.acl-administer-jobs</name>
+    <value>*</value>
+  </property>
+
+  <!-- END ACLs -->
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-site.xml
new file mode 100644
index 0000000..da3545c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-site.xml
@@ -0,0 +1,579 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <!-- i/o properties -->
+
+  <property>
+    <name>io.sort.mb</name>
+    <value>200</value>
+    <description>
+      The total amount of Map-side buffer memory to use while sorting files
+    </description>
+  </property>
+
+  <property>
+    <name>io.sort.record.percent</name>
+    <value>.2</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>io.sort.spill.percent</name>
+    <value>0.9</value>
+    <description>Percentage of sort buffer used for record collection</description>
+  </property>
+
+  <property>
+    <name>io.sort.factor</name>
+    <value>100</value>
+    <description>No description</description>
+  </property>
+
+  <!-- map/reduce properties -->
+
+  <property>
+    <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
+    <value>250</value>
+    <description>Normally, this is the amount of time before killing
+      processes, and the recommended-default is 5.000 seconds - a value of
+      5000 here.  In this case, we are using it solely to blast tasks before
+      killing them, and killing them very quickly (1/4 second) to guarantee
+      that we do not leave VMs around for later jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.handler.count</name>
+    <value>50</value>
+    <description>
+      The number of server threads for the JobTracker. This should be roughly
+      4% of the number of tasktracker nodes.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.system.dir</name>
+    <value>/mapred/system</value>
+    <description>Path on the HDFS where where the MapReduce framework stores system files</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker</name>
+    <!-- cluster variant -->
+    <value>localhost:50300</value>
+    <description>JobTracker address</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.http.address</name>
+    <!-- cluster variant -->
+    <value>localhost:50030</value>
+    <description>JobTracker host and http port address</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <!-- cluster specific -->
+    <name>mapred.local.dir</name>
+    <value>/hadoop/mapred</value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.cluster.administrators</name>
+    <value> hadoop</value>
+  </property>
+
+  <property>
+    <name>mapred.reduce.parallel.copies</name>
+    <value>30</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.map.tasks.maximum</name>
+    <value>4</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.tasktracker.reduce.tasks.maximum</name>
+    <value>2</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>tasktracker.http.threads</name>
+    <value>50</value>
+  </property>
+
+  <property>
+    <name>mapred.map.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>If true, then multiple instances of some map tasks
+      may be executed in parallel.</description>
+  </property>
+
+  <property>
+    <name>mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>If true, then multiple instances of some reduce tasks
+      may be executed in parallel.</description>
+  </property>
+
+  <property>
+    <name>mapred.reduce.slowstart.completed.maps</name>
+    <value>0.05</value>
+  </property>
+
+  <property>
+    <name>mapred.inmem.merge.threshold</name>
+    <value>1000</value>
+    <description>The threshold, in terms of the number of files
+      for the in-memory merge process. When we accumulate threshold number of files
+      we initiate the in-memory merge and spill to disk. A value of 0 or less than
+      0 indicates we want to DON'T have any threshold and instead depend only on
+      the ramfs's memory consumption to trigger the merge.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.shuffle.merge.percent</name>
+    <value>0.66</value>
+    <description>The usage threshold at which an in-memory merge will be
+      initiated, expressed as a percentage of the total memory allocated to
+      storing in-memory map outputs, as defined by
+      mapred.job.shuffle.input.buffer.percent.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.shuffle.input.buffer.percent</name>
+    <value>0.7</value>
+    <description>The percentage of memory to be allocated from the maximum heap
+      size to storing map outputs during the shuffle.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.map.output.compression.codec</name>
+    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+    <description>If the map outputs are compressed, how should they be
+      compressed
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.output.compression.type</name>
+    <value>BLOCK</value>
+    <description>If the job outputs are to compressed as SequenceFiles, how should
+      they be compressed? Should be one of NONE, RECORD or BLOCK.
+    </description>
+  </property>
+
+
+  <property>
+    <name>mapred.jobtracker.completeuserjobs.maximum</name>
+    <value>0</value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.taskScheduler</name>
+    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.restart.recover</name>
+    <value>false</value>
+    <description>"true" to enable (job) recovery upon restart,
+      "false" to start afresh
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.reduce.input.buffer.percent</name>
+    <value>0.0</value>
+    <description>The percentage of memory- relative to the maximum heap size- to
+      retain map outputs during the reduce. When the shuffle is concluded, any
+      remaining map outputs in memory must consume less than this threshold before
+      the reduce can begin.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.input.limit</name>
+    <value>10737418240</value>
+    <description>The limit on the input size of the reduce. (This value
+      is 10 Gb.)  If the estimated input size of the reduce is greater than
+      this value, job is failed. A value of -1 means that there is no limit
+      set. </description>
+  </property>
+
+
+  <!-- copied from kryptonite configuration -->
+  <property>
+    <name>mapred.compress.map.output</name>
+    <value></value>
+  </property>
+
+
+  <property>
+    <name>mapred.task.timeout</name>
+    <value>600000</value>
+    <description>The number of milliseconds before a task will be
+      terminated if it neither reads an input, writes an output, nor
+      updates its status string.
+    </description>
+  </property>
+
+  <property>
+    <name>jetty.connector</name>
+    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.task.tracker.task-controller</name>
+    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
+    <description>
+      TaskController which is used to launch and manage task execution.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.child.root.logger</name>
+    <value>INFO,TLA</value>
+  </property>
+
+  <property>
+    <name>ambari.mapred.child.java.opts.memory</name>
+    <value>768</value>
+
+    <description>Java options Memory for the TaskTracker child processes</description>
+  </property>
+
+  <property>
+    <name>mapred.child.java.opts</name>
+    <value>-server -Xmx${ambari.mapred.child.java.opts.memory}m -Djava.net.preferIPv4Stack=true</value>
+    <description>Java options for the TaskTracker child processes</description>
+  </property>
+
+  <property>
+    <name>mapred.cluster.map.memory.mb</name>
+    <value>1536</value>
+    <description>
+      The virtual memory size of a single Map slot in the MapReduce framework
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.cluster.reduce.memory.mb</name>
+    <value>2048</value>
+    <description>
+      The virtual memory size of a single Reduce slot in the MapReduce framework
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.map.memory.mb</name>
+    <value>1536</value>
+    <description>
+      Virtual memory for single Map task
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.reduce.memory.mb</name>
+    <value>2048</value>
+    <description>
+      Virtual memory for single Reduce task
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.cluster.max.map.memory.mb</name>
+    <value>6144</value>
+    <description>
+      Upper limit on virtual memory size for a single Map task of any MapReduce job
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.cluster.max.reduce.memory.mb</name>
+    <value>4096</value>
+    <description>
+      Upper limit on virtual memory size for a single Reduce task of any MapReduce job
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.hosts</name>
+    <value>/etc/hadoop/conf/mapred.include</value>
+    <description>
+      Names a file that contains the list of nodes that may
+      connect to the jobtracker.  If the value is empty, all hosts are
+      permitted.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.hosts.exclude</name>
+    <value>/etc/hadoop/conf/mapred.exclude</value>
+    <description>
+      Names a file that contains the list of hosts that
+      should be excluded by the jobtracker.  If the value is empty, no
+      hosts are excluded.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.max.tracker.blacklists</name>
+    <value>16</value>
+    <description>
+      if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.script.path</name>
+    <value>/etc/hadoop/conf/health_check</value>
+    <description>
+      Directory path to view job status
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.interval</name>
+    <value>135000</value>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.script.timeout</name>
+    <value>60000</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.active</name>
+    <value>false</value>
+    <description>Indicates if persistency of job status information is
+      active or not.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.hours</name>
+    <value>1</value>
+    <description>The number of hours job status information is persisted in DFS.
+      The job status information will be available after it drops of the memory
+      queue and between jobtracker restarts. With a zero value the job status
+      information is not persisted at all in DFS.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.dir</name>
+    <value>/etc/hadoop/conf/health_check</value>
+    <description>The directory where the job status information is persisted
+      in a file system to be available after it drops of the memory queue and
+      between jobtracker restarts.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.retirejob.check</name>
+    <value>10000</value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.retirejob.interval</name>
+    <value>21600000</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.history.completed.location</name>
+    <value>/mapred/history/done</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.task.maxvmem</name>
+    <value></value>
+    <final>true</final>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.maxtasks.per.job</name>
+    <value>-1</value>
+    <final>true</final>
+    <description>The maximum number of tasks for a single job.
+      A value of -1 indicates that there is no maximum.  </description>
+  </property>
+
+  <property>
+    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>mapred.userlog.retain.hours</name>
+    <value>24</value>
+    <description>
+      The maximum time, in hours, for which the user-logs are to be retained after the job completion.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.reuse.jvm.num.tasks</name>
+    <value>1</value>
+    <description>
+      How many tasks to run per jvm. If set to -1, there is no limit
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.kerberos.principal</name>
+    <value></value>
+    <description>
+      JT user name key.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.tasktracker.kerberos.principal</name>
+    <value></value>
+    <description>
+      tt user name key. "_HOST" is replaced by the host name of the task tracker.
+    </description>
+  </property>
+
+
+  <property>
+    <name>hadoop.job.history.user.location</name>
+    <value>none</value>
+    <final>true</final>
+  </property>
+
+
+  <property>
+    <name>mapreduce.jobtracker.keytab.file</name>
+    <value></value>
+    <description>
+      The keytab for the jobtracker principal.
+    </description>
+
+  </property>
+
+  <property>
+    <name>mapreduce.tasktracker.keytab.file</name>
+    <value></value>
+    <description>The filename of the keytab for the task tracker</description>
+  </property>
+
+  <property>
+    <name>mapred.task.tracker.http.address</name>
+    <value></value>
+    <description>Http address for task tracker.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.staging.root.dir</name>
+    <value>/user</value>
+    <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
+      name. It is a path in the default file system.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.tasktracker.group</name>
+    <value>hadoop</value>
+    <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
+
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
+    <value>50000000</value>
+    <final>true</final>
+    <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+      initialize.
+    </description>
+  </property>
+  <property>
+    <name>mapreduce.history.server.embedded</name>
+    <value>false</value>
+    <description>Should job history server be embedded within Job tracker
+      process</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.history.server.http.address</name>
+    <!-- cluster variant -->
+    <value>localhost:51111</value>
+    <description>Http address of the history server</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.kerberos.principal</name>
+    <!-- cluster variant -->
+    <value></value>
+    <description>Job history user name key. (must map to same user as JT
+      user)</description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.keytab.file</name>
+    <!-- cluster variant -->
+    <value></value>
+    <description>The keytab for the job history server principal.</description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
+    <value>180</value>
+    <description>
+      3-hour sliding window (value is in minutes)
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
+    <value>15</value>
+    <description>
+      15-minute bucket size (value is in minutes)
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.queue.names</name>
+    <value>default</value>
+    <description> Comma separated list of queues configured for this jobtracker.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/88482360/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
new file mode 100644
index 0000000..a1ca7ba
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
@@ -0,0 +1,76 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAPREDUCE</name>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>1.2.0.1.3.3.0</version>
+      <components>
+
+        <component>
+          <name>JOBTRACKER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/jobtracker.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>TASKTRACKER</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/tasktracker.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>MAPREDUCE_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-queue-acls</config-type>
+      </configuration-dependencies>
+    </service>
+
+  </services>
+</metainfo>
\ No newline at end of file