You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2014/01/31 20:50:53 UTC

[28/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
new file mode 100644
index 0000000..8b29cc3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
@@ -0,0 +1,212 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+from utils import hdfs_directory
+import urlparse
+
+
+def namenode(action=None, format=True):
+  import params
+  #we need this directory to be present before any action(HA manual steps for
+  #additional namenode)
+  if action == "configure":
+    create_name_dirs(params.dfs_name_dir)
+
+  if action == "start":
+    if format:
+      format_namenode()
+      pass
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+    service(
+      action="start", name="namenode", user=params.hdfs_user,
+      keytab=params.dfs_namenode_keytab_file,
+      create_pid_dir=True,
+      create_log_dir=True,
+      principal=params.dfs_namenode_kerberos_principal
+    )
+
+    # TODO: extract creating of dirs to different services
+    create_app_directories()
+    create_user_directories()
+
+  if action == "stop":
+    service(
+      action="stop", name="namenode", user=params.hdfs_user,
+      keytab=params.dfs_namenode_keytab_file,
+      principal=params.dfs_namenode_kerberos_principal
+    )
+
+  if action == "decommission":
+    decommission()
+
+def create_name_dirs(directories):
+  import params
+
+  dirs = directories.split(",")
+  Directory(dirs,
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+
+def create_app_directories():
+  import params
+
+  hdfs_directory(name="/tmp",
+                 owner=params.hdfs_user,
+                 mode="777"
+  )
+  #mapred directories
+  if params.has_histroryserver:
+    hdfs_directory(name="/mapred",
+                   owner=params.mapred_user
+    )
+    hdfs_directory(name="/mapred/system",
+                   owner=params.hdfs_user
+    )
+    #hbase directories
+  if len(params.hbase_master_hosts) != 0:
+    hdfs_directory(name=params.hbase_hdfs_root_dir,
+                   owner=params.hbase_user
+    )
+    hdfs_directory(name=params.hbase_staging_dir,
+                   owner=params.hbase_user,
+                   mode="711"
+    )
+    #hive directories
+  if len(params.hive_server_host) != 0:
+    hdfs_directory(name=params.hive_apps_whs_dir,
+                   owner=params.hive_user,
+                   mode="777"
+    )
+  if len(params.hcat_server_hosts) != 0:
+    hdfs_directory(name=params.webhcat_apps_dir,
+                   owner=params.webhcat_user,
+                   mode="755"
+    )
+  if len(params.hs_host) != 0:
+    if params.yarn_log_aggregation_enabled:
+      hdfs_directory(name=params.yarn_nm_app_log_dir,
+                     owner=params.yarn_user,
+                     group=params.user_group,
+                     mode="777",
+                     recursive_chmod=True
+      )
+    hdfs_directory(name=params.mapreduce_jobhistory_intermediate_done_dir,
+                   owner=params.mapred_user,
+                   group=params.user_group,
+                   mode="777"
+    )
+
+    hdfs_directory(name=params.mapreduce_jobhistory_done_dir,
+                   owner=params.mapred_user,
+                   group=params.user_group,
+                   mode="1777"
+    )
+
+  if params.has_falcon_host:
+    if params.falcon_store_uri[0:4] == "hdfs":
+      hdfs_directory(name=params.store_uri,
+                     owner=params.falcon_user,
+                     mode="755"
+      )
+
+def create_user_directories():
+  import params
+
+  hdfs_directory(name=params.smoke_hdfs_user_dir,
+                 owner=params.smoke_user,
+                 mode=params.smoke_hdfs_user_mode
+  )
+
+  if params.has_hive_server_host:
+    hdfs_directory(name=params.hive_hdfs_user_dir,
+                   owner=params.hive_user,
+                   mode=params.hive_hdfs_user_mode
+    )
+
+  if params.has_hcat_server_host:
+    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
+      hdfs_directory(name=params.hcat_hdfs_user_dir,
+                     owner=params.hcat_user,
+                     mode=params.hcat_hdfs_user_mode
+      )
+    hdfs_directory(name=params.webhcat_hdfs_user_dir,
+                   owner=params.webhcat_user,
+                   mode=params.webhcat_hdfs_user_mode
+    )
+
+  if params.has_oozie_server:
+    hdfs_directory(name=params.oozie_hdfs_user_dir,
+                   owner=params.oozie_user,
+                   mode=params.oozie_hdfs_user_mode
+    )
+
+
+def format_namenode(force=None):
+  import params
+
+  mark_dir = params.namenode_formatted_mark_dir
+  dfs_name_dir = params.dfs_name_dir
+  hdfs_user = params.hdfs_user
+  hadoop_conf_dir = params.hadoop_conf_dir
+
+  if not params.dfs_ha_enabled:
+    if force:
+      ExecuteHadoop('namenode -format',
+                    kinit_override=True)
+    else:
+      File('/tmp/checkForFormat.sh',
+           content=StaticFile("checkForFormat.sh"),
+           mode=0755)
+      Execute(format(
+        "sh /tmp/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
+        "{dfs_name_dir}"),
+              not_if=format("test -d {mark_dir}"),
+              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
+    Execute(format("mkdir -p {mark_dir}"))
+
+
+def decommission():
+  import params
+
+  hdfs_user = params.hdfs_user
+  conf_dir = params.hadoop_conf_dir
+  user_group = params.user_group
+
+  File(params.exclude_file_path,
+       content=Template("exclude_hosts_list.j2"),
+       owner=hdfs_user,
+       group=user_group
+  )
+
+  ExecuteHadoop('dfsadmin -refreshNodes',
+                user=hdfs_user,
+                conf_dir=conf_dir,
+                kinit_override=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py
new file mode 100644
index 0000000..a943455
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+from utils import hdfs_directory
+
+
+def snamenode(action=None, format=False):
+  import params
+
+  if action == "configure":
+    Directory(params.fs_checkpoint_dir,
+              recursive=True,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group)
+  elif action == "start":
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_secondary_namenode_keytab_file,
+      principal=params.dfs_secondary_namenode_kerberos_principal
+    )
+  elif action == "stop":
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_secondary_namenode_keytab_file,
+      principal=params.dfs_secondary_namenode_kerberos_principal
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py
new file mode 100644
index 0000000..f2134d5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py
@@ -0,0 +1,74 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class JournalNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    service(
+      action="start", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_journalnode_keytab_file,
+      principal=params.dfs_journalnode_kerberos_principal
+    )
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    service(
+      action="stop", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_journalnode_keytab_file,
+      principal=params.dfs_journalnode_kerberos_principal
+    )
+
+  def configure(self, env):
+    import params
+
+    Directory(params.jn_edits_dir,
+              recursive=True,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.journalnode_pid_file)
+
+
+if __name__ == "__main__":
+  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
new file mode 100644
index 0000000..deb01d5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
@@ -0,0 +1,68 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_namenode import namenode
+
+
+class NameNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    #TODO remove when config action will be implemented
+    self.config(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    namenode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="stop")
+
+  def config(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="configure")
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.namenode_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="decommission")
+    pass
+
+if __name__ == "__main__":
+  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
new file mode 100644
index 0000000..99fdc70
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
@@ -0,0 +1,193 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+import os
+
+config = Script.get_config()
+
+if System.get_instance().os_type == "oraclelinux":
+  ulimit_cmd = ''
+else:
+  ulimit_cmd = "ulimit -c unlimited; "
+
+#security params
+security_enabled = config['configurations']['global']['security_enabled']
+dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.secondary.namenode.keytab.file']
+dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
+falcon_user = config['configurations']['global']['falcon_user']
+
+dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
+dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
+dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
+dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
+dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
+
+#exclude file
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+nm_host = default("/clusterHostInfo/nm_host", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_histroryserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_journalnode_hosts = not len(journalnode_hosts)  == 0
+has_zkfc_hosts = not len(zkfc_hosts)  == 0
+has_falcon_host = not len(falcon_host)  == 0
+
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+#users and groups
+yarn_user = config['configurations']['global']['yarn_user']
+hbase_user = config['configurations']['global']['hbase_user']
+nagios_user = config['configurations']['global']['nagios_user']
+oozie_user = config['configurations']['global']['oozie_user']
+webhcat_user = config['configurations']['global']['hcat_user']
+hcat_user = config['configurations']['global']['hcat_user']
+hive_user = config['configurations']['global']['hive_user']
+smoke_user =  config['configurations']['global']['smokeuser']
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = status_params.hdfs_user
+
+user_group = config['configurations']['global']['user_group']
+proxyuser_group =  config['configurations']['global']['proxyuser_group']
+nagios_group = config['configurations']['global']['nagios_group']
+smoke_user_group = "users"
+
+#hadoop params
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
+hadoop_bin = "/usr/lib/hadoop/sbin"
+
+hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+
+dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
+dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
+
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+
+jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']#"/grid/0/hdfs/journal"
+
+# if stack_version[0] == "2":
+dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
+# else:
+#   dfs_name_dir = default("/configurations/hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
+
+namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
+namenode_dirs_stub_filename = "namenode_dirs_created"
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']#","/apps/hbase/data")
+hbase_staging_dir = "/apps/hbase/staging"
+hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"] #, "/apps/hive/warehouse")
+webhcat_apps_dir = "/apps/webhcat"
+yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']#","true")
+yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']#","/app-logs")
+mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']#","/app-logs")
+mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']#","/mr-history/done")
+
+if has_oozie_server:
+  oozie_hdfs_user_dir = format("/user/{oozie_user}")
+  oozie_hdfs_user_mode = 775
+if has_hcat_server_host:
+  hcat_hdfs_user_dir = format("/user/{hcat_user}")
+  hcat_hdfs_user_mode = 755
+  webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+  webhcat_hdfs_user_mode = 755
+if has_hive_server_host:
+  hive_hdfs_user_dir = format("/user/{hive_user}")
+  hive_hdfs_user_mode = 700
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 770
+
+namenode_formatted_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
+
+# if stack_version[0] == "2":
+fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'] #","/tmp/hadoop-hdfs/dfs/namesecondary")
+# else:
+#   fs_checkpoint_dir = default("/configurations/core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
+
+# if stack_version[0] == "2":
+dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
+# else:
+#   dfs_data_dir = default('/configurations/hdfs-site/dfs.data.dir',"/tmp/hadoop-hdfs/dfs/data")
+
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namemodes_ids_list:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname in nn_host:
+      namenode_id = nn_id
+  namenode_id = None
+
+journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
+if journalnode_address:
+  journalnode_port = journalnode_address.split(":")[1]
+
+falcon_store_uri = config['configurations']['global']['falcon_store_uri']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
new file mode 100644
index 0000000..d27b13a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
@@ -0,0 +1,107 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class HdfsServiceCheck(Script):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    unique = get_unique_id_and_date()
+    dir = '/tmp'
+    tmp_file = format("{dir}/{unique}")
+
+    safemode_command = "dfsadmin -safemode get | grep OFF"
+
+    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod -R 777 {dir}")
+    test_dir_exists = format("hadoop fs -test -e {dir}")
+    cleanup_cmd = format("fs -rm {tmp_file}")
+    #cleanup put below to handle retries; if retrying there wil be a stale file
+    #that needs cleanup; exit code is fn of second command
+    create_file_cmd = format(
+      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
+    test_cmd = format("fs -test -e {tmp_file}")
+    if params.security_enabled:
+      Execute(format(
+        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
+        "{smoke_user}'"))
+    ExecuteHadoop(safemode_command,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(create_dir_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  not_if=test_dir_exists,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(create_file_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(test_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    if params.has_journalnode_hosts:
+      journalnode_port = params.journalnode_port
+      smoke_test_user = params.smoke_user
+      checkWebUIFileName = "checkWebUI.py"
+      checkWebUIFilePath = format("/tmp/{checkWebUIFileName}")
+      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
+      checkWebUICmd = format(
+        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
+        "{comma_sep_jn_hosts} -p {journalnode_port}'")
+      File(checkWebUIFilePath,
+           content=StaticFile(checkWebUIFileName))
+
+      Execute(checkWebUICmd,
+              logoutput=True,
+              try_sleep=3,
+              tries=5
+      )
+
+    if params.is_namenode_master:
+      if params.has_zkfc_hosts:
+        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+        check_zkfc_process_cmd = format(
+          "ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+        Execute(check_zkfc_process_cmd,
+                logoutput=True,
+                try_sleep=3,
+                tries=5
+        )
+
+
+if __name__ == "__main__":
+  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
new file mode 100644
index 0000000..8f682ec
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
@@ -0,0 +1,64 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_snamenode import snamenode
+
+
+class SNameNode(Script):
+  def install(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.install_packages(env)
+
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.config(env)
+    snamenode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+    snamenode(action="stop")
+
+  def config(self, env):
+    import params
+
+    env.set_params(params)
+
+    snamenode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.snamenode_pid_file)
+
+
+if __name__ == "__main__":
+  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
new file mode 100644
index 0000000..4097373
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
@@ -0,0 +1,31 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hdfs_user = config['configurations']['global']['hdfs_user']
+hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
new file mode 100644
index 0000000..017a47a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
@@ -0,0 +1,138 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def service(action=None, name=None, user=None, create_pid_dir=False,
+            create_log_dir=False, keytab=None, principal=None):
+  import params
+
+  kinit_cmd = "true"
+  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
+  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
+  log_dir = format("{hdfs_log_dir_prefix}/{user}")
+  hadoop_daemon = format(
+    "{ulimit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
+    "{hadoop_bin}/hadoop-daemon.sh")
+  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
+
+  if create_pid_dir:
+    Directory(pid_dir,
+              owner=user,
+              recursive=True)
+  if create_log_dir:
+    Directory(log_dir,
+              owner=user,
+              recursive=True)
+
+  if params.security_enabled:
+    principal_replaced = principal.replace("_HOST", params.hostname)
+    kinit_cmd = format("kinit -kt {keytab} {principal_replaced}")
+
+    if name == "datanode":
+      user = "root"
+      pid_file = format(
+        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
+
+  daemon_cmd = format("{cmd} {action} {name}")
+
+  service_is_up = format(
+    "ls {pid_file} >/dev/null 2>&1 &&"
+    " ps `cat {pid_file}` >/dev/null 2>&1") if action == "start" else None
+
+  Execute(kinit_cmd)
+  Execute(daemon_cmd,
+          user = user,
+          not_if=service_is_up
+  )
+  if action == "stop":
+    File(pid_file,
+         action="delete",
+         ignore_failures=True
+    )
+
+
+def hdfs_directory(name=None, owner=None, group=None,
+                   mode=None, recursive_chown=False, recursive_chmod=False):
+  import params
+
+  dir_exists = format("hadoop fs -ls {name} >/dev/null 2>&1")
+  namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
+
+  stub_dir = params.namenode_dirs_created_stub_dir
+  stub_filename = params.namenode_dirs_stub_filename
+  dir_absent_in_stub = format(
+    "grep -q '^{name}$' {stub_dir}/{stub_filename} > /dev/null 2>&1; test $? -ne 0")
+  record_dir_in_stub = format("echo '{name}' >> {stub_dir}/{stub_filename}")
+  tries = 30
+  try_sleep = 10
+  dfs_check_nn_status_cmd = "true"
+
+  if params.dfs_ha_enabled:
+    namenode_id = params.namenode_id
+    dfs_check_nn_status_cmd = format(
+      "hdfs haadmin -getServiceState $namenode_id | grep active > /dev/null")
+
+  #if params.stack_version[0] == "2":
+  mkdir_cmd = format("fs -mkdir -p {name}")
+  #else:
+  #  mkdir_cmd = format("fs -mkdir {name}")
+
+  if params.security_enabled:
+    Execute(format("kinit -kt {hdfs_user_keytab} {hdfs_user}"),
+            user = params.hdfs_user)
+  ExecuteHadoop(mkdir_cmd,
+                try_sleep=try_sleep,
+                tries=tries,
+                not_if=format(
+                  "! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
+                  "{dir_exists} && ! {namenode_safe_mode_off}"),
+                only_if=format(
+                  "{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
+                  "! {dir_exists}"),
+                conf_dir=params.hadoop_conf_dir,
+                user=params.hdfs_user
+  )
+  Execute(record_dir_in_stub,
+          user=params.hdfs_user,
+          only_if=format("{dir_absent_in_stub}")
+  )
+
+  recursive = "-R" if recursive_chown else ""
+  perm_cmds = []
+
+  if owner:
+    chown = owner
+    if group:
+      chown = format("{owner}:{group}")
+    perm_cmds.append(format("fs -chown {recursive} {chown} {name}"))
+  if mode:
+    perm_cmds.append(format("fs -chmod {recursive} {mode} {name}"))
+  for cmd in perm_cmds:
+    ExecuteHadoop(cmd,
+                  user=params.hdfs_user,
+                  only_if=format("! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && {namenode_safe_mode_off} && {dir_exists}"),
+                  try_sleep=try_sleep,
+                  tries=tries,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
new file mode 100644
index 0000000..1f9ba65
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
@@ -0,0 +1,62 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class ZkfcSlave(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    service(
+      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    service(
+      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def config(self, env):
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.zkfc_pid_file)
+
+
+if __name__ == "__main__":
+  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..c3af46e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-exec-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-exec-log4j.xml
new file mode 100644
index 0000000..b0f5268
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-exec-log4j.xml
@@ -0,0 +1,122 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>hive.log.threshold</name>
+    <value>ALL</value>
+  </property>
+  <property>
+    <name>hive.root.logger</name>
+    <value>INFO,FA</value>
+  </property>
+  <property>
+    <name>hive.log.dir</name>
+    <value>/tmp/${user.name}</value>
+  </property>
+  <property>
+    <name>hive.log.file</name>
+    <value>${hive.query.id}.log</value>
+  </property>
+  <property>
+    <name>log4j.rootLogger</name>
+    <value>${hive.root.logger}, EventCounter</value>
+  </property>
+  <property>
+    <name>log4j.threshhold</name>
+    <value>${hive.log.threshold}</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA</name>
+    <value>org.apache.log4j.FileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA.File</name>
+    <value>${hive.log.dir}/${hive.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.FA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.target</name>
+    <value>System.err</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.EventCounter</name>
+    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
+  </property>
+  <property>
+    <name>log4j.category.DataNucleus</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore.Schema</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Datastore</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Plugin</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.MetaData</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Query</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.General</name>
+    <value>ERROR,FA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Enhancer</name>
+    <value>ERROR,FA</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-log4j.xml
new file mode 100644
index 0000000..e92c3e8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-log4j.xml
@@ -0,0 +1,130 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>hive.log.threshold</name>
+    <value>ALL</value>
+  </property>
+  <property>
+    <name>hive.root.logger</name>
+    <value>WARN,DRFA</value>
+  </property>
+  <property>
+    <name>hive.log.dir</name>
+    <value>/tmp/${user.name}</value>
+  </property>
+  <property>
+    <name>hive.log.file</name>
+    <value>hive.log</value>
+  </property>
+  <property>
+    <name>log4j.rootLogger</name>
+    <value>${hive.root.logger}, EventCounter</value>
+  </property>
+  <property>
+    <name>log4j.threshold</name>
+    <value>${hive.log.threshold}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.File</name>
+    <value>${hive.log.dir}/${hive.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.target</name>
+    <value>System.err</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.encoding</name>
+    <value>UTF-8</value>
+  </property>
+  <property>
+    <name>log4j.appender.EventCounter</name>
+    <value>org.apache.hadoop.hive.shims.HiveEventCounter</value>
+  </property>
+  <property>
+    <name>log4j.category.DataNucleus</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.Datastore.Schema</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Datastore</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Plugin</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.MetaData</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Query</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.General</name>
+    <value>ERROR,DRFA</value>
+  </property>
+  <property>
+    <name>log4j.category.JPOX.Enhancer</name>
+    <value>ERROR,DRFA</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
index 899cfe9..bfdc8ac 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
@@ -183,7 +183,7 @@ limitations under the License.
     <name>hive.optimize.bucketmapjoin.sortedmerge</name>
     <value>true</value>
     <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
-      of buckets, a sort-merge join can be performed by setting this parameter as true.
+    of buckets, a sort-merge join can be performed by setting this parameter as true.
     </description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
index f13193f..3e89601 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
@@ -16,30 +16,143 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.12.0.2.0.6.1</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+      <version>0.12.0.2.1.1</version>
+      <components>
 
-    <components>        
         <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
+          <name>HIVE_METASTORE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/hive_metastore.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
+
         <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
+          <name>HIVE_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/hive_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
+
+        <component>
+          <name>MYSQL_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/mysql_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
         <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
+          <name>HIVE_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hive_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hive</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql-connector-java</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos6</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos5</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>hive-site</config-type>
+        <config-type>hive-log4j</config-type>
+        <config-type>hive-exec-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>HCATALOG</name>
+      <comment>This is comment for HCATALOG service</comment>
+      <version>0.12.0.2.0.6.0</version>
+      <components>
         <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
+          <name>HCAT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hcat_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>hive-site</config-type>
-    </configuration-dependencies>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hcatalog</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+    </service>
+
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/addMysqlUser.sh
new file mode 100644
index 0000000..8d31b91
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/addMysqlUser.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+mysqldbpasswd=$3
+mysqldbhost=$4
+myhostname=$(hostname -f)
+
+service $mysqldservice start
+echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
+if [ '$(mysql -u root -e "select user from mysql.user where user='$mysqldbuser' and host='$myhostname'" | grep "$mysqldbuser")' != '0' ]; then
+  echo "Adding user $mysqldbuser@$myhostname";
+  mysql -u root -e "CREATE USER '$mysqldbuser'@'$myhostname' IDENTIFIED BY '$mysqldbpasswd';";
+  mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$myhostname';";
+fi
+mysql -u root -e "flush privileges;"
+service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcatSmoke.sh
new file mode 100644
index 0000000..9e7b33f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcatSmoke.sh
@@ -0,0 +1,35 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+
+case "$2" in
+
+prepare)
+  hcat -e "show tables"
+  hcat -e "drop table IF EXISTS ${tablename}"
+  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
+;;
+
+cleanup)
+  hcat -e "drop table IF EXISTS ${tablename}"
+;;
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveSmoke.sh
new file mode 100644
index 0000000..7e03524
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveSmoke.sh
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
+echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2.sql
new file mode 100644
index 0000000..99a3865
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2.sql
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
+DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2Smoke.sh
new file mode 100644
index 0000000..051a21e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2Smoke.sh
@@ -0,0 +1,31 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
+
+if [ "x$smokeout" == "x" ]; then
+  echo "Smoke test of hiveserver2 passed"
+  exit 0
+else
+  echo "Smoke test of hiveserver2 wasnt passed"
+  echo $smokeout
+  exit 1
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/pigSmoke.sh
new file mode 100644
index 0000000..2e90ac0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startHiveserver2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startHiveserver2.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startHiveserver2.sh
new file mode 100644
index 0000000..fa90c2f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startHiveserver2.sh
@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=' ' > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startMetastore.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startMetastore.sh
new file mode 100644
index 0000000..9350776
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startMetastore.sh
@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 hive --service metastore > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
new file mode 100644
index 0000000..2993d3a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+
+def hcat():
+  import params
+
+  Directory(params.hcat_conf_dir,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+  Directory(params.hcat_pid_dir,
+            owner=params.webhcat_user,
+            recursive=True
+  )
+
+  hcat_TemplateConfig('hcat-env.sh')
+
+
+def hcat_TemplateConfig(name):
+  import params
+
+  TemplateConfig(format("{hcat_conf_dir}/{name}"),
+                 owner=params.hcat_user,
+                 group=params.user_group
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_client.py
new file mode 100644
index 0000000..8b5921a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hcat import hcat
+
+class HCatClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+
+    hcat()
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
new file mode 100644
index 0000000..bc75e44
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import get_unique_id_and_date
+
+def hcat_service_check():
+    import params
+    unique = get_unique_id_and_date()
+    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
+    test_cmd = format("fs -test -e {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format(
+        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
+    else:
+      kinit_cmd = ""
+
+    File('/tmp/hcatSmoke.sh',
+         content=StaticFile("hcatSmoke.sh"),
+         mode=0755
+    )
+
+    prepare_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} prepare")
+
+    Execute(prepare_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True)
+
+    ExecuteHadoop(test_cmd,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir)
+
+    cleanup_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} cleanup")
+
+    Execute(cleanup_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
new file mode 100644
index 0000000..44e6a77
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+import os
+
+
+def hive(name=None):
+  import params
+
+  if name == 'metastore' or name == 'hiveserver2':
+    hive_config_dir = params.hive_server_conf_dir
+    config_file_mode = 0600
+    jdbc_connector()
+  else:
+    hive_config_dir = params.hive_conf_dir
+    config_file_mode = 0644
+
+  Directory(hive_config_dir,
+            owner=params.hive_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  XmlConfig("hive-site.xml",
+            conf_dir=hive_config_dir,
+            configurations=params.config['configurations']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=config_file_mode
+  )
+
+  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 "
+               "{jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
+
+  Execute(cmd,
+          not_if=format("[ -f {check_db_connection_jar_name}]"))
+
+  if name == 'metastore':
+    File(params.start_metastore_path,
+         mode=0755,
+         content=StaticFile('startMetastore.sh')
+    )
+
+  elif name == 'hiveserver2':
+    File(params.start_hiveserver2_path,
+         mode=0755,
+         content=StaticFile('startHiveserver2.sh')
+    )
+
+  if name != "client":
+    crt_directory(params.hive_pid_dir)
+    crt_directory(params.hive_log_dir)
+    crt_directory(params.hive_var_lib)
+
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
+  )
+
+  crt_file(format("{hive_conf_dir}/hive-default.xml.template"))
+  crt_file(format("{hive_conf_dir}/hive-env.sh.template"))
+
+  log4j_exec_filename = 'hive-exec-log4j.properties'
+  if (params.log4j_exec_props != None):
+    PropertiesFile(log4j_exec_filename,
+                   dir=params.hive_conf_dir,
+                   properties=params.log4j_exec_props,
+                   mode=0664,
+                   owner=params.hive_user,
+                   group=params.user_group
+    )
+  elif (os.path.exists("{params.hive_conf_dir}/{log4j_exec_filename}.template")):
+    File(format("{params.hive_conf_dir}/{log4j_exec_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=StaticFile(format("{params.hive_conf_dir}/{log4j_exec_filename}.template"))
+    )
+
+  log4j_filename = 'hive-log4j.properties'
+  if (params.log4j_props != None):
+    PropertiesFile(log4j_filename,
+                   dir=params.hive_conf_dir,
+                   properties=params.log4j_props,
+                   mode=0664,
+                   owner=params.hive_user,
+                   group=params.user_group
+    )
+  elif (os.path.exists("{params.hive_conf_dir}/{log4j_filename}.template")):
+    File(format("{params.hive_conf_dir}/{log4j_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=StaticFile(format("{params.hive_conf_dir}/{log4j_filename}.template"))
+    )
+
+
+def crt_directory(name):
+  import params
+
+  Directory(name,
+            recursive=True,
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+
+
+def crt_file(name):
+  import params
+
+  File(name,
+       owner=params.hive_user,
+       group=params.user_group
+  )
+
+
+def jdbc_connector():
+  import params
+
+  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
+    cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            creates=params.target,
+            path=["/bin", "usr/bin/"])
+
+  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+    cmd = format(
+      "mkdir -p {artifact_dir} ; curl -kf --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
+      "cp {driver_curl_target} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            path=["/bin", "usr/bin/"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_client.py
new file mode 100644
index 0000000..0a5fb2b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_client.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+
+from hive import hive
+
+class HiveClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='client')
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_metastore.py
new file mode 100644
index 0000000..c741174
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_metastore.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+
+class HiveMetastore(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='metastore')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'metastore',
+                   action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hive_service( 'metastore',
+                   action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveMetastore().execute()